Refactoring of SessionService into a component part 3.
[chromium-blink-merge.git] / tools / valgrind / chrome_tests.py
blobe4f62a5d865a09d99a801441d82926e8f62b74a0
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 ''' Runs various chrome tests through valgrind_test.py.'''
8 import glob
9 import logging
10 import multiprocessing
11 import optparse
12 import os
13 import stat
14 import subprocess
15 import sys
17 import logging_utils
18 import path_utils
20 import common
21 import valgrind_test
23 class TestNotFound(Exception): pass
25 class MultipleGTestFiltersSpecified(Exception): pass
27 class BuildDirNotFound(Exception): pass
29 class BuildDirAmbiguous(Exception): pass
31 class ExecutableNotFound(Exception): pass
33 class BadBinary(Exception): pass
35 class ChromeTests:
36 SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
39 def __init__(self, options, args, test):
40 if ':' in test:
41 (self._test, self._gtest_filter) = test.split(':', 1)
42 else:
43 self._test = test
44 self._gtest_filter = options.gtest_filter
46 if self._test not in self._test_list:
47 raise TestNotFound("Unknown test: %s" % test)
49 if options.gtest_filter and options.gtest_filter != self._gtest_filter:
50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
51 "and --test %s" % test)
53 self._options = options
54 self._args = args
56 script_dir = path_utils.ScriptDir()
57 # Compute the top of the tree (the "source dir") from the script dir (where
58 # this script lives). We assume that the script dir is in tools/valgrind/
59 # relative to the top of the tree.
60 self._source_dir = os.path.dirname(os.path.dirname(script_dir))
61 # since this path is used for string matching, make sure it's always
62 # an absolute Unix-style path
63 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
64 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
65 self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
67 if not self._options.build_dir:
68 dirs = [
69 os.path.join(self._source_dir, "xcodebuild", "Debug"),
70 os.path.join(self._source_dir, "out", "Debug"),
71 os.path.join(self._source_dir, "build", "Debug"),
73 build_dir = [d for d in dirs if os.path.isdir(d)]
74 if len(build_dir) > 1:
75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
76 "%s\nPlease specify just one "
77 "using --build-dir" % ", ".join(build_dir))
78 elif build_dir:
79 self._options.build_dir = build_dir[0]
80 else:
81 self._options.build_dir = None
83 if self._options.build_dir:
84 build_dir = os.path.abspath(self._options.build_dir)
85 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
87 def _EnsureBuildDirFound(self):
88 if not self._options.build_dir:
89 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
90 "specify it manually using --build-dir")
92 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
93 '''Generates the default command array that most tests will use.'''
94 if exe and common.IsWindows():
95 exe += '.exe'
97 cmd = list(self._command_preamble)
99 # Find all suppressions matching the following pattern:
100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
101 # and list them with --suppressions= prefix.
102 script_dir = path_utils.ScriptDir()
103 tool_name = tool.ToolName();
104 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
105 if os.path.exists(suppression_file):
106 cmd.append("--suppressions=%s" % suppression_file)
107 # Platform-specific suppression
108 for platform in common.PlatformNames():
109 platform_suppression_file = \
110 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
111 if os.path.exists(platform_suppression_file):
112 cmd.append("--suppressions=%s" % platform_suppression_file)
114 if self._options.valgrind_tool_flags:
115 cmd += self._options.valgrind_tool_flags.split(" ")
116 if self._options.keep_logs:
117 cmd += ["--keep_logs"]
118 if valgrind_test_args != None:
119 for arg in valgrind_test_args:
120 cmd.append(arg)
121 if exe:
122 self._EnsureBuildDirFound()
123 exe_path = os.path.join(self._options.build_dir, exe)
124 if not os.path.exists(exe_path):
125 raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
127 # Make sure we don't try to test ASan-built binaries
128 # with other dynamic instrumentation-based tools.
129 # TODO(timurrrr): also check TSan and MSan?
130 # `nm` might not be available, so use try-except.
131 try:
132 nm_output = subprocess.check_output(["nm", exe_path])
133 if nm_output.find("__asan_init") != -1:
134 raise BadBinary("You're trying to run an executable instrumented "
135 "with AddressSanitizer under %s. Please provide "
136 "an uninstrumented executable." % tool_name)
137 except OSError:
138 pass
140 cmd.append(exe_path)
141 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
142 # so we can find the slowpokes.
143 cmd.append("--gtest_print_time")
144 # Built-in test launcher for gtest-based executables runs tests using
145 # multiple process by default. Force the single-process mode back.
146 cmd.append("--single-process-tests")
147 if self._options.gtest_repeat:
148 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
149 if self._options.gtest_shuffle:
150 cmd.append("--gtest_shuffle")
151 if self._options.brave_new_test_launcher:
152 cmd.append("--brave-new-test-launcher")
153 if self._options.test_launcher_bot_mode:
154 cmd.append("--test-launcher-bot-mode")
155 return cmd
157 def Run(self):
158 ''' Runs the test specified by command-line argument --test '''
159 logging.info("running test %s" % (self._test))
160 return self._test_list[self._test](self)
162 def _AppendGtestFilter(self, tool, name, cmd):
163 '''Append an appropriate --gtest_filter flag to the googletest binary
164 invocation.
165 If the user passed his own filter mentioning only one test, just use it.
166 Othewise, filter out tests listed in the appropriate gtest_exclude files.
168 if (self._gtest_filter and
169 ":" not in self._gtest_filter and
170 "?" not in self._gtest_filter and
171 "*" not in self._gtest_filter):
172 cmd.append("--gtest_filter=%s" % self._gtest_filter)
173 return
175 filters = []
176 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
178 gtest_filter_files = [
179 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
180 # Use ".gtest.txt" files only for slow tools, as they now contain
181 # Valgrind- and Dr.Memory-specific filters.
182 # TODO(glider): rename the files to ".gtest_slow.txt"
183 if tool.ToolName() in ChromeTests.SLOW_TOOLS:
184 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
185 for platform_suffix in common.PlatformNames():
186 gtest_filter_files += [
187 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
188 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
189 (tool.ToolName(), platform_suffix))]
190 logging.info("Reading gtest exclude filter files:")
191 for filename in gtest_filter_files:
192 # strip the leading absolute path (may be very long on the bot)
193 # and the following / or \.
194 readable_filename = filename.replace("\\", "/") # '\' on Windows
195 readable_filename = readable_filename.replace(self._source_dir, "")[1:]
196 if not os.path.exists(filename):
197 logging.info(" \"%s\" - not found" % readable_filename)
198 continue
199 logging.info(" \"%s\" - OK" % readable_filename)
200 f = open(filename, 'r')
201 for line in f.readlines():
202 if line.startswith("#") or line.startswith("//") or line.isspace():
203 continue
204 line = line.rstrip()
205 test_prefixes = ["FLAKY", "FAILS"]
206 for p in test_prefixes:
207 # Strip prefixes from the test names.
208 line = line.replace(".%s_" % p, ".")
209 # Exclude the original test name.
210 filters.append(line)
211 if line[-2:] != ".*":
212 # List all possible prefixes if line doesn't end with ".*".
213 for p in test_prefixes:
214 filters.append(line.replace(".", ".%s_" % p))
215 # Get rid of duplicates.
216 filters = set(filters)
217 gtest_filter = self._gtest_filter
218 if len(filters):
219 if gtest_filter:
220 gtest_filter += ":"
221 if gtest_filter.find("-") < 0:
222 gtest_filter += "-"
223 else:
224 gtest_filter = "-"
225 gtest_filter += ":".join(filters)
226 if gtest_filter:
227 cmd.append("--gtest_filter=%s" % gtest_filter)
229 @staticmethod
230 def ShowTests():
231 test_to_names = {}
232 for name, test_function in ChromeTests._test_list.iteritems():
233 test_to_names.setdefault(test_function, []).append(name)
235 name_to_aliases = {}
236 for names in test_to_names.itervalues():
237 names.sort(key=lambda name: len(name))
238 name_to_aliases[names[0]] = names[1:]
240 print
241 print "Available tests:"
242 print "----------------"
243 for name, aliases in sorted(name_to_aliases.iteritems()):
244 if aliases:
245 print " {} (aka {})".format(name, ', '.join(aliases))
246 else:
247 print " {}".format(name)
249 def SetupLdPath(self, requires_build_dir):
250 if requires_build_dir:
251 self._EnsureBuildDirFound()
252 elif not self._options.build_dir:
253 return
255 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
256 if (os.getenv("LD_LIBRARY_PATH")):
257 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
258 self._options.build_dir))
259 else:
260 os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
262 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
263 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
264 cmd = self._DefaultCommand(tool, name, valgrind_test_args)
265 self._AppendGtestFilter(tool, name, cmd)
266 cmd.extend(['--test-tiny-timeout=1000'])
267 if cmd_args:
268 cmd.extend(cmd_args)
270 self.SetupLdPath(True)
271 return tool.Run(cmd, module)
273 def RunCmdLine(self):
274 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
275 cmd = self._DefaultCommand(tool, None, self._args)
276 self.SetupLdPath(False)
277 return tool.Run(cmd, None)
279 def TestAccessibility(self):
280 return self.SimpleTest("accessibility", "accessibility_unittests")
282 def TestAddressInput(self):
283 return self.SimpleTest("addressinput", "libaddressinput_unittests")
285 def TestAngle(self):
286 return self.SimpleTest("angle", "angle_unittests")
288 def TestAppList(self):
289 return self.SimpleTest("app_list", "app_list_unittests")
291 def TestAsh(self):
292 return self.SimpleTest("ash", "ash_unittests")
294 def TestAshShell(self):
295 return self.SimpleTest("ash_shelf", "ash_shell_unittests")
297 def TestAura(self):
298 return self.SimpleTest("aura", "aura_unittests")
300 def TestBase(self):
301 return self.SimpleTest("base", "base_unittests")
303 def TestBlinkHeap(self):
304 return self.SimpleTest("blink_heap", "blink_heap_unittests")
306 def TestBlinkPlatform(self):
307 return self.SimpleTest("blink_platform", "blink_platform_unittests")
309 def TestCacheInvalidation(self):
310 return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
312 def TestCast(self):
313 return self.SimpleTest("chrome", "cast_unittests")
315 def TestCC(self):
316 return self.SimpleTest("cc", "cc_unittests")
318 def TestChromeApp(self):
319 return self.SimpleTest("chrome_app", "chrome_app_unittests")
321 def TestChromeElf(self):
322 return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
324 def TestChromeDriver(self):
325 return self.SimpleTest("chromedriver", "chromedriver_unittests")
327 def TestChromeOS(self):
328 return self.SimpleTest("chromeos", "chromeos_unittests")
330 def TestCloudPrint(self):
331 return self.SimpleTest("cloud_print", "cloud_print_unittests")
333 def TestComponents(self):
334 return self.SimpleTest("components", "components_unittests")
336 def TestCompositor(self):
337 return self.SimpleTest("compositor", "compositor_unittests")
339 def TestContent(self):
340 return self.SimpleTest("content", "content_unittests")
342 def TestCourgette(self):
343 return self.SimpleTest("courgette", "courgette_unittests")
345 def TestCrypto(self):
346 return self.SimpleTest("crypto", "crypto_unittests")
348 def TestDevice(self):
349 return self.SimpleTest("device", "device_unittests")
351 def TestDisplay(self):
352 return self.SimpleTest("display", "display_unittests")
354 def TestEvents(self):
355 return self.SimpleTest("events", "events_unittests")
357 def TestExtensions(self):
358 return self.SimpleTest("extensions", "extensions_unittests")
360 def TestFFmpeg(self):
361 return self.SimpleTest("chrome", "ffmpeg_unittests")
363 def TestFFmpegRegressions(self):
364 return self.SimpleTest("chrome", "ffmpeg_regression_tests")
366 def TestGCM(self):
367 return self.SimpleTest("gcm", "gcm_unit_tests")
369 def TestGfx(self):
370 return self.SimpleTest("gfx", "gfx_unittests")
372 def TestGin(self):
373 return self.SimpleTest("gin", "gin_unittests")
375 def TestGoogleApis(self):
376 return self.SimpleTest("google_apis", "google_apis_unittests")
378 def TestGPU(self):
379 return self.SimpleTest("gpu", "gpu_unittests")
381 def TestIpc(self):
382 return self.SimpleTest("ipc", "ipc_tests",
383 valgrind_test_args=["--trace_children"])
385 def TestInstallerUtil(self):
386 return self.SimpleTest("installer_util", "installer_util_unittests")
388 def TestJingle(self):
389 return self.SimpleTest("chrome", "jingle_unittests")
391 def TestKeyboard(self):
392 return self.SimpleTest("keyboard", "keyboard_unittests")
394 def TestMedia(self):
395 return self.SimpleTest("chrome", "media_unittests")
397 def TestMessageCenter(self):
398 return self.SimpleTest("message_center", "message_center_unittests")
400 def TestMojoCommon(self):
401 return self.SimpleTest("mojo_common", "mojo_common_unittests")
403 def TestMojoPublicBindings(self):
404 return self.SimpleTest("mojo_public_bindings",
405 "mojo_public_bindings_unittests")
407 def TestMojoPublicEnv(self):
408 return self.SimpleTest("mojo_public_env",
409 "mojo_public_environment_unittests")
411 def TestMojoPublicSystem(self):
412 return self.SimpleTest("mojo_public_system",
413 "mojo_public_system_unittests")
415 def TestMojoPublicSysPerf(self):
416 return self.SimpleTest("mojo_public_sysperf",
417 "mojo_public_system_perftests")
419 def TestMojoPublicUtility(self):
420 return self.SimpleTest("mojo_public_utility",
421 "mojo_public_utility_unittests")
423 def TestMojoSystem(self):
424 return self.SimpleTest("mojo_system", "mojo_system_unittests")
426 def TestNet(self):
427 return self.SimpleTest("net", "net_unittests")
429 def TestNetPerf(self):
430 return self.SimpleTest("net", "net_perftests")
432 def TestPhoneNumber(self):
433 return self.SimpleTest("phonenumber", "libphonenumber_unittests")
435 def TestPPAPI(self):
436 return self.SimpleTest("chrome", "ppapi_unittests")
438 def TestPrinting(self):
439 return self.SimpleTest("chrome", "printing_unittests")
441 def TestRemoting(self):
442 return self.SimpleTest("chrome", "remoting_unittests",
443 cmd_args=[
444 "--ui-test-action-timeout=60000",
445 "--ui-test-action-max-timeout=150000"])
447 def TestSql(self):
448 return self.SimpleTest("chrome", "sql_unittests")
450 def TestSync(self):
451 return self.SimpleTest("chrome", "sync_unit_tests")
453 def TestLinuxSandbox(self):
454 return self.SimpleTest("sandbox", "sandbox_linux_unittests")
456 def TestUnit(self):
457 # http://crbug.com/51716
458 # Disabling all unit tests
459 # Problems reappeared after r119922
460 if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
461 logging.warning("unit_tests are disabled for memcheck on MacOS.")
462 return 0;
463 return self.SimpleTest("chrome", "unit_tests")
465 def TestUIBaseUnit(self):
466 return self.SimpleTest("chrome", "ui_base_unittests")
468 def TestUIUnit(self):
469 return self.SimpleTest("chrome", "ui_unittests")
471 def TestURL(self):
472 return self.SimpleTest("chrome", "url_unittests")
474 def TestViews(self):
475 return self.SimpleTest("views", "views_unittests")
478 # Valgrind timeouts are in seconds.
479 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
480 # UI test timeouts are in milliseconds.
481 UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
482 "--ui-test-action-max-timeout=150000",
483 "--no-sandbox"]
485 # TODO(thestig) fine-tune these values.
486 # Valgrind timeouts are in seconds.
487 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
488 # Browser test timeouts are in milliseconds.
489 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
490 "--ui-test-action-max-timeout=800000",
491 "--no-sandbox"]
493 def TestBrowser(self):
494 return self.SimpleTest("chrome", "browser_tests",
495 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
496 cmd_args=self.BROWSER_TEST_ARGS)
498 def TestContentBrowser(self):
499 return self.SimpleTest("content", "content_browsertests",
500 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
501 cmd_args=self.BROWSER_TEST_ARGS)
503 def TestInteractiveUI(self):
504 return self.SimpleTest("chrome", "interactive_ui_tests",
505 valgrind_test_args=self.UI_VALGRIND_ARGS,
506 cmd_args=self.UI_TEST_ARGS)
508 def TestSafeBrowsing(self):
509 return self.SimpleTest("chrome", "safe_browsing_tests",
510 valgrind_test_args=self.UI_VALGRIND_ARGS,
511 cmd_args=(["--ui-test-action-max-timeout=450000"]))
513 def TestSyncIntegration(self):
514 return self.SimpleTest("chrome", "sync_integration_tests",
515 valgrind_test_args=self.UI_VALGRIND_ARGS,
516 cmd_args=(["--ui-test-action-max-timeout=450000"]))
518 def TestLayoutChunk(self, chunk_num, chunk_size):
519 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
520 # list of tests. Wrap around to beginning of list at end.
521 # If chunk_size is zero, run all tests in the list once.
522 # If a text file is given as argument, it is used as the list of tests.
523 assert((chunk_size == 0) != (len(self._args) == 0))
524 # Build the ginormous commandline in 'cmd'.
525 # It's going to be roughly
526 # python valgrind_test.py ...
527 # but we'll use the --indirect flag to valgrind_test.py
528 # to avoid valgrinding python.
529 # Start by building the valgrind_test.py commandline.
530 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
531 cmd = self._DefaultCommand(tool)
532 cmd.append("--trace_children")
533 cmd.append("--indirect_webkit_layout")
534 cmd.append("--ignore_exit_code")
535 # Now build script_cmd, the run-webkits-tests commandline.
536 # Store each chunk in its own directory so that we can find the data later
537 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
538 out_dir = os.path.join(path_utils.ScriptDir(), "latest")
539 out_dir = os.path.join(out_dir, chunk_dir)
540 if os.path.exists(out_dir):
541 old_files = glob.glob(os.path.join(out_dir, "*.txt"))
542 for f in old_files:
543 os.remove(f)
544 else:
545 os.makedirs(out_dir)
546 script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
547 "Scripts", "run-webkit-tests")
548 # http://crbug.com/260627: After the switch to content_shell from DRT, each
549 # test now brings up 3 processes. Under Valgrind, they become memory bound
550 # and can eventually OOM if we don't reduce the total count.
551 # It'd be nice if content_shell automatically throttled the startup of new
552 # tests if we're low on memory.
553 jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
554 script_cmd = ["python", script, "-v",
555 # run a separate DumpRenderTree for each test
556 "--batch-size=1",
557 "--fully-parallel",
558 "--child-processes=%d" % jobs,
559 "--time-out-ms=800000",
560 "--no-retry-failures", # retrying takes too much time
561 # http://crbug.com/176908: Don't launch a browser when done.
562 "--no-show-results",
563 "--nocheck-sys-deps"]
564 # Pass build mode to run-webkit-tests. We aren't passed it directly,
565 # so parse it out of build_dir. run-webkit-tests can only handle
566 # the two values "Release" and "Debug".
567 # TODO(Hercules): unify how all our scripts pass around build mode
568 # (--mode / --target / --build-dir / --debug)
569 if self._options.build_dir:
570 build_root, mode = os.path.split(self._options.build_dir)
571 script_cmd.extend(["--build-directory", build_root, "--target", mode])
572 if (chunk_size > 0):
573 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
574 if len(self._args):
575 # if the arg is a txt file, then treat it as a list of tests
576 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
577 script_cmd.append("--test-list=%s" % self._args[0])
578 else:
579 script_cmd.extend(self._args)
580 self._AppendGtestFilter(tool, "layout", script_cmd)
581 # Now run script_cmd with the wrapper in cmd
582 cmd.extend(["--"])
583 cmd.extend(script_cmd)
585 # Layout tests often times fail quickly, but the buildbot remains green.
586 # Detect this situation when running with the default chunk size.
587 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
588 min_runtime_in_seconds=120
589 else:
590 min_runtime_in_seconds=0
591 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
592 return ret
595 def TestLayout(self):
596 # A "chunk file" is maintained in the local directory so that each test
597 # runs a slice of the layout tests of size chunk_size that increments with
598 # each run. Since tests can be added and removed from the layout tests at
599 # any time, this is not going to give exact coverage, but it will allow us
600 # to continuously run small slices of the layout tests under valgrind rather
601 # than having to run all of them in one shot.
602 chunk_size = self._options.num_tests
603 if chunk_size == 0 or len(self._args):
604 return self.TestLayoutChunk(0, 0)
605 chunk_num = 0
606 chunk_file = os.path.join("valgrind_layout_chunk.txt")
607 logging.info("Reading state from " + chunk_file)
608 try:
609 f = open(chunk_file)
610 if f:
611 chunk_str = f.read()
612 if len(chunk_str):
613 chunk_num = int(chunk_str)
614 # This should be enough so that we have a couple of complete runs
615 # of test data stored in the archive (although note that when we loop
616 # that we almost guaranteed won't be at the end of the test list)
617 if chunk_num > 10000:
618 chunk_num = 0
619 f.close()
620 except IOError, (errno, strerror):
621 logging.error("error reading from file %s (%d, %s)" % (chunk_file,
622 errno, strerror))
623 # Save the new chunk size before running the tests. Otherwise if a
624 # particular chunk hangs the bot, the chunk number will never get
625 # incremented and the bot will be wedged.
626 logging.info("Saving state to " + chunk_file)
627 try:
628 f = open(chunk_file, "w")
629 chunk_num += 1
630 f.write("%d" % chunk_num)
631 f.close()
632 except IOError, (errno, strerror):
633 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
634 strerror))
635 # Since we're running small chunks of the layout tests, it's important to
636 # mark the ones that have errors in them. These won't be visible in the
637 # summary list for long, but will be useful for someone reviewing this bot.
638 return self.TestLayoutChunk(chunk_num, chunk_size)
640 # The known list of tests.
641 # Recognise the original abbreviations as well as full executable names.
642 _test_list = {
643 "cmdline" : RunCmdLine,
644 "addressinput": TestAddressInput,
645 "libaddressinput_unittests": TestAddressInput,
646 "accessibility": TestAccessibility,
647 "angle": TestAngle, "angle_unittests": TestAngle,
648 "app_list": TestAppList, "app_list_unittests": TestAppList,
649 "ash": TestAsh, "ash_unittests": TestAsh,
650 "ash_shell": TestAshShell, "ash_shell_unittests": TestAshShell,
651 "aura": TestAura, "aura_unittests": TestAura,
652 "base": TestBase, "base_unittests": TestBase,
653 "blink_heap": TestBlinkHeap,
654 "blink_platform": TestBlinkPlatform,
655 "browser": TestBrowser, "browser_tests": TestBrowser,
656 "cacheinvalidation": TestCacheInvalidation,
657 "cacheinvalidation_unittests": TestCacheInvalidation,
658 "cast": TestCast, "cast_unittests": TestCast,
659 "cc": TestCC, "cc_unittests": TestCC,
660 "chrome_app": TestChromeApp,
661 "chrome_elf": TestChromeElf,
662 "chromedriver": TestChromeDriver,
663 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
664 "cloud_print": TestCloudPrint,
665 "cloud_print_unittests": TestCloudPrint,
666 "components": TestComponents,"components_unittests": TestComponents,
667 "compositor": TestCompositor,"compositor_unittests": TestCompositor,
668 "content": TestContent, "content_unittests": TestContent,
669 "content_browsertests": TestContentBrowser,
670 "courgette": TestCourgette, "courgette_unittests": TestCourgette,
671 "crypto": TestCrypto, "crypto_unittests": TestCrypto,
672 "device": TestDevice, "device_unittests": TestDevice,
673 "display": TestDisplay, "display_unittests": TestDisplay,
674 "events": TestEvents, "events_unittests": TestEvents,
675 "extensions": TestExtensions, "extensions_unittests": TestExtensions,
676 "ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
677 "ffmpeg_regression_tests": TestFFmpegRegressions,
678 "gcm": TestGCM, "gcm_unit_tests": TestGCM,
679 "gin": TestGin, "gin_unittests": TestGin,
680 "gfx": TestGfx, "gfx_unittests": TestGfx,
681 "google_apis": TestGoogleApis,
682 "gpu": TestGPU, "gpu_unittests": TestGPU,
683 "ipc": TestIpc, "ipc_tests": TestIpc,
684 "installer_util": TestInstallerUtil,
685 "interactive_ui": TestInteractiveUI,
686 "jingle": TestJingle, "jingle_unittests": TestJingle,
687 "keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
688 "layout": TestLayout, "layout_tests": TestLayout,
689 "media": TestMedia, "media_unittests": TestMedia,
690 "message_center": TestMessageCenter,
691 "message_center_unittests" : TestMessageCenter,
692 "mojo_common": TestMojoCommon,
693 "mojo_system": TestMojoSystem,
694 "mojo_public_system": TestMojoPublicSystem,
695 "mojo_public_utility": TestMojoPublicUtility,
696 "mojo_public_bindings": TestMojoPublicBindings,
697 "mojo_public_env": TestMojoPublicEnv,
698 "mojo_public_sysperf": TestMojoPublicSysPerf,
699 "net": TestNet, "net_unittests": TestNet,
700 "net_perf": TestNetPerf, "net_perftests": TestNetPerf,
701 "phonenumber": TestPhoneNumber,
702 "libphonenumber_unittests": TestPhoneNumber,
703 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
704 "printing": TestPrinting, "printing_unittests": TestPrinting,
705 "remoting": TestRemoting, "remoting_unittests": TestRemoting,
706 "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
707 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
708 "sql": TestSql, "sql_unittests": TestSql,
709 "sync": TestSync, "sync_unit_tests": TestSync,
710 "sync_integration_tests": TestSyncIntegration,
711 "sync_integration": TestSyncIntegration,
712 "ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
713 "ui_unit": TestUIUnit, "ui_unittests": TestUIUnit,
714 "unit": TestUnit, "unit_tests": TestUnit,
715 "url": TestURL, "url_unittests": TestURL,
716 "views": TestViews, "views_unittests": TestViews,
717 "webkit": TestLayout,
721 def _main():
722 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
723 "[-t <test> ...]")
725 parser.add_option("--help-tests", dest="help_tests", action="store_true",
726 default=False, help="List all available tests")
727 parser.add_option("-b", "--build-dir",
728 help="the location of the compiler output")
729 parser.add_option("--target", help="Debug or Release")
730 parser.add_option("-t", "--test", action="append", default=[],
731 help="which test to run, supports test:gtest_filter format "
732 "as well.")
733 parser.add_option("--baseline", action="store_true", default=False,
734 help="generate baseline data instead of validating")
735 parser.add_option("--gtest_filter",
736 help="additional arguments to --gtest_filter")
737 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
738 parser.add_option("--gtest_shuffle", action="store_true", default=False,
739 help="Randomize tests' orders on every iteration.")
740 parser.add_option("-v", "--verbose", action="store_true", default=False,
741 help="verbose output - enable debug log messages")
742 parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
743 help="specify a valgrind tool to run the tests under")
744 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
745 help="specify custom flags for the selected valgrind tool")
746 parser.add_option("--keep_logs", action="store_true", default=False,
747 help="store memory tool logs in the <tool>.logs directory "
748 "instead of /tmp.\nThis can be useful for tool "
749 "developers/maintainers.\nPlease note that the <tool>"
750 ".logs directory will be clobbered on tool startup.")
751 parser.add_option("-n", "--num_tests", type="int",
752 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
753 help="for layout tests: # of subtests per run. 0 for all.")
754 # TODO(thestig) Remove this if we can.
755 parser.add_option("--gtest_color", dest="gtest_color", default="no",
756 help="dummy compatibility flag for sharding_supervisor.")
757 parser.add_option("--brave-new-test-launcher", action="store_true",
758 help="run the tests with --brave-new-test-launcher")
759 parser.add_option("--test-launcher-bot-mode", action="store_true",
760 help="run the tests with --test-launcher-bot-mode")
762 options, args = parser.parse_args()
764 # Bake target into build_dir.
765 if options.target and options.build_dir:
766 assert (options.target !=
767 os.path.basename(os.path.dirname(options.build_dir)))
768 options.build_dir = os.path.join(os.path.abspath(options.build_dir),
769 options.target)
771 if options.verbose:
772 logging_utils.config_root(logging.DEBUG)
773 else:
774 logging_utils.config_root()
776 if options.help_tests:
777 ChromeTests.ShowTests()
778 return 0
780 if not options.test:
781 parser.error("--test not specified")
783 if len(options.test) != 1 and options.gtest_filter:
784 parser.error("--gtest_filter and multiple tests don't make sense together")
786 for t in options.test:
787 tests = ChromeTests(options, args, t)
788 ret = tests.Run()
789 if ret: return ret
790 return 0
793 if __name__ == "__main__":
794 sys.exit(_main())