Explicitly add python-numpy dependency to install-build-deps.
[chromium-blink-merge.git] / tools / valgrind / chrome_tests.py
blob677cb63cd14e508c0135b36df62bd4242d4043a7
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 ''' Runs various chrome tests through valgrind_test.py.'''
8 import glob
9 import logging
10 import multiprocessing
11 import optparse
12 import os
13 import stat
14 import subprocess
15 import sys
17 import logging_utils
18 import path_utils
20 import common
21 import valgrind_test
23 class TestNotFound(Exception): pass
25 class MultipleGTestFiltersSpecified(Exception): pass
27 class BuildDirNotFound(Exception): pass
29 class BuildDirAmbiguous(Exception): pass
31 class ExecutableNotFound(Exception): pass
33 class BadBinary(Exception): pass
35 class ChromeTests:
36 SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
39 def __init__(self, options, args, test):
40 if ':' in test:
41 (self._test, self._gtest_filter) = test.split(':', 1)
42 else:
43 self._test = test
44 self._gtest_filter = options.gtest_filter
46 if self._test not in self._test_list:
47 raise TestNotFound("Unknown test: %s" % test)
49 if options.gtest_filter and options.gtest_filter != self._gtest_filter:
50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
51 "and --test %s" % test)
53 self._options = options
54 self._args = args
56 script_dir = path_utils.ScriptDir()
57 # Compute the top of the tree (the "source dir") from the script dir (where
58 # this script lives). We assume that the script dir is in tools/valgrind/
59 # relative to the top of the tree.
60 self._source_dir = os.path.dirname(os.path.dirname(script_dir))
61 # since this path is used for string matching, make sure it's always
62 # an absolute Unix-style path
63 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
64 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
65 self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
67 if not self._options.build_dir:
68 dirs = [
69 os.path.join(self._source_dir, "xcodebuild", "Debug"),
70 os.path.join(self._source_dir, "out", "Debug"),
71 os.path.join(self._source_dir, "build", "Debug"),
73 build_dir = [d for d in dirs if os.path.isdir(d)]
74 if len(build_dir) > 1:
75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
76 "%s\nPlease specify just one "
77 "using --build-dir" % ", ".join(build_dir))
78 elif build_dir:
79 self._options.build_dir = build_dir[0]
80 else:
81 self._options.build_dir = None
83 if self._options.build_dir:
84 build_dir = os.path.abspath(self._options.build_dir)
85 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
87 def _EnsureBuildDirFound(self):
88 if not self._options.build_dir:
89 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
90 "specify it manually using --build-dir")
92 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
93 '''Generates the default command array that most tests will use.'''
94 if exe and common.IsWindows():
95 exe += '.exe'
97 cmd = list(self._command_preamble)
99 # Find all suppressions matching the following pattern:
100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
101 # and list them with --suppressions= prefix.
102 script_dir = path_utils.ScriptDir()
103 tool_name = tool.ToolName();
104 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
105 if os.path.exists(suppression_file):
106 cmd.append("--suppressions=%s" % suppression_file)
107 # Platform-specific suppression
108 for platform in common.PlatformNames():
109 platform_suppression_file = \
110 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
111 if os.path.exists(platform_suppression_file):
112 cmd.append("--suppressions=%s" % platform_suppression_file)
114 if self._options.valgrind_tool_flags:
115 cmd += self._options.valgrind_tool_flags.split(" ")
116 if self._options.keep_logs:
117 cmd += ["--keep_logs"]
118 if valgrind_test_args != None:
119 for arg in valgrind_test_args:
120 cmd.append(arg)
121 if exe:
122 self._EnsureBuildDirFound()
123 exe_path = os.path.join(self._options.build_dir, exe)
124 if not os.path.exists(exe_path):
125 raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
127 # Make sure we don't try to test ASan-built binaries
128 # with other dynamic instrumentation-based tools.
129 # TODO(timurrrr): also check TSan and MSan?
130 # `nm` might not be available, so use try-except.
131 try:
132 # Do not perform this check on OS X, as 'nm' on 10.6 can't handle
133 # binaries built with Clang 3.5+.
134 if not common.IsMac():
135 nm_output = subprocess.check_output(["nm", exe_path])
136 if nm_output.find("__asan_init") != -1:
137 raise BadBinary("You're trying to run an executable instrumented "
138 "with AddressSanitizer under %s. Please provide "
139 "an uninstrumented executable." % tool_name)
140 except OSError:
141 pass
143 cmd.append(exe_path)
144 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
145 # so we can find the slowpokes.
146 cmd.append("--gtest_print_time")
147 # Built-in test launcher for gtest-based executables runs tests using
148 # multiple process by default. Force the single-process mode back.
149 cmd.append("--single-process-tests")
150 if self._options.gtest_repeat:
151 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
152 if self._options.gtest_shuffle:
153 cmd.append("--gtest_shuffle")
154 if self._options.gtest_break_on_failure:
155 cmd.append("--gtest_break_on_failure")
156 if self._options.brave_new_test_launcher:
157 cmd.append("--brave-new-test-launcher")
158 if self._options.test_launcher_bot_mode:
159 cmd.append("--test-launcher-bot-mode")
160 return cmd
162 def Run(self):
163 ''' Runs the test specified by command-line argument --test '''
164 logging.info("running test %s" % (self._test))
165 return self._test_list[self._test](self)
167 def _AppendGtestFilter(self, tool, name, cmd):
168 '''Append an appropriate --gtest_filter flag to the googletest binary
169 invocation.
170 If the user passed his own filter mentioning only one test, just use it.
171 Othewise, filter out tests listed in the appropriate gtest_exclude files.
173 if (self._gtest_filter and
174 ":" not in self._gtest_filter and
175 "?" not in self._gtest_filter and
176 "*" not in self._gtest_filter):
177 cmd.append("--gtest_filter=%s" % self._gtest_filter)
178 return
180 filters = []
181 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
183 gtest_filter_files = [
184 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
185 # Use ".gtest.txt" files only for slow tools, as they now contain
186 # Valgrind- and Dr.Memory-specific filters.
187 # TODO(glider): rename the files to ".gtest_slow.txt"
188 if tool.ToolName() in ChromeTests.SLOW_TOOLS:
189 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
190 for platform_suffix in common.PlatformNames():
191 gtest_filter_files += [
192 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
193 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
194 (tool.ToolName(), platform_suffix))]
195 logging.info("Reading gtest exclude filter files:")
196 for filename in gtest_filter_files:
197 # strip the leading absolute path (may be very long on the bot)
198 # and the following / or \.
199 readable_filename = filename.replace("\\", "/") # '\' on Windows
200 readable_filename = readable_filename.replace(self._source_dir, "")[1:]
201 if not os.path.exists(filename):
202 logging.info(" \"%s\" - not found" % readable_filename)
203 continue
204 logging.info(" \"%s\" - OK" % readable_filename)
205 f = open(filename, 'r')
206 for line in f.readlines():
207 if line.startswith("#") or line.startswith("//") or line.isspace():
208 continue
209 line = line.rstrip()
210 test_prefixes = ["FLAKY", "FAILS"]
211 for p in test_prefixes:
212 # Strip prefixes from the test names.
213 line = line.replace(".%s_" % p, ".")
214 # Exclude the original test name.
215 filters.append(line)
216 if line[-2:] != ".*":
217 # List all possible prefixes if line doesn't end with ".*".
218 for p in test_prefixes:
219 filters.append(line.replace(".", ".%s_" % p))
220 # Get rid of duplicates.
221 filters = set(filters)
222 gtest_filter = self._gtest_filter
223 if len(filters):
224 if gtest_filter:
225 gtest_filter += ":"
226 if gtest_filter.find("-") < 0:
227 gtest_filter += "-"
228 else:
229 gtest_filter = "-"
230 gtest_filter += ":".join(filters)
231 if gtest_filter:
232 cmd.append("--gtest_filter=%s" % gtest_filter)
234 @staticmethod
235 def ShowTests():
236 test_to_names = {}
237 for name, test_function in ChromeTests._test_list.iteritems():
238 test_to_names.setdefault(test_function, []).append(name)
240 name_to_aliases = {}
241 for names in test_to_names.itervalues():
242 names.sort(key=lambda name: len(name))
243 name_to_aliases[names[0]] = names[1:]
245 print
246 print "Available tests:"
247 print "----------------"
248 for name, aliases in sorted(name_to_aliases.iteritems()):
249 if aliases:
250 print " {} (aka {})".format(name, ', '.join(aliases))
251 else:
252 print " {}".format(name)
254 def SetupLdPath(self, requires_build_dir):
255 if requires_build_dir:
256 self._EnsureBuildDirFound()
257 elif not self._options.build_dir:
258 return
260 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
261 if (os.getenv("LD_LIBRARY_PATH")):
262 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
263 self._options.build_dir))
264 else:
265 os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
267 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
268 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
269 cmd = self._DefaultCommand(tool, name, valgrind_test_args)
270 self._AppendGtestFilter(tool, name, cmd)
271 cmd.extend(['--test-tiny-timeout=1000'])
272 if cmd_args:
273 cmd.extend(cmd_args)
275 self.SetupLdPath(True)
276 return tool.Run(cmd, module)
278 def RunCmdLine(self):
279 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
280 cmd = self._DefaultCommand(tool, None, self._args)
281 self.SetupLdPath(False)
282 return tool.Run(cmd, None)
284 def TestAccessibility(self):
285 return self.SimpleTest("accessibility", "accessibility_unittests")
287 def TestAddressInput(self):
288 return self.SimpleTest("addressinput", "libaddressinput_unittests")
290 def TestAngle(self):
291 return self.SimpleTest("angle", "angle_unittests")
293 def TestAppList(self):
294 return self.SimpleTest("app_list", "app_list_unittests")
296 def TestAsh(self):
297 return self.SimpleTest("ash", "ash_unittests")
299 def TestAshShell(self):
300 return self.SimpleTest("ash_shelf", "ash_shell_unittests")
302 def TestAura(self):
303 return self.SimpleTest("aura", "aura_unittests")
305 def TestBase(self):
306 return self.SimpleTest("base", "base_unittests")
308 def TestBlinkHeap(self):
309 return self.SimpleTest("blink_heap", "blink_heap_unittests")
311 def TestBlinkPlatform(self):
312 return self.SimpleTest("blink_platform", "blink_platform_unittests")
314 def TestCacheInvalidation(self):
315 return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
317 def TestCast(self):
318 return self.SimpleTest("chrome", "cast_unittests")
320 def TestCC(self):
321 return self.SimpleTest("cc", "cc_unittests")
323 def TestChromeApp(self):
324 return self.SimpleTest("chrome_app", "chrome_app_unittests")
326 def TestChromeElf(self):
327 return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
329 def TestChromeDriver(self):
330 return self.SimpleTest("chromedriver", "chromedriver_unittests")
332 def TestChromeOS(self):
333 return self.SimpleTest("chromeos", "chromeos_unittests")
335 def TestCloudPrint(self):
336 return self.SimpleTest("cloud_print", "cloud_print_unittests")
338 def TestComponents(self):
339 return self.SimpleTest("components", "components_unittests")
341 def TestCompositor(self):
342 return self.SimpleTest("compositor", "compositor_unittests")
344 def TestContent(self):
345 return self.SimpleTest("content", "content_unittests")
347 def TestCourgette(self):
348 return self.SimpleTest("courgette", "courgette_unittests")
350 def TestCrypto(self):
351 return self.SimpleTest("crypto", "crypto_unittests")
353 def TestDevice(self):
354 return self.SimpleTest("device", "device_unittests")
356 def TestDisplay(self):
357 return self.SimpleTest("display", "display_unittests")
359 def TestEvents(self):
360 return self.SimpleTest("events", "events_unittests")
362 def TestExtensions(self):
363 return self.SimpleTest("extensions", "extensions_unittests")
365 def TestFFmpeg(self):
366 return self.SimpleTest("chrome", "ffmpeg_unittests")
368 def TestFFmpegRegressions(self):
369 return self.SimpleTest("chrome", "ffmpeg_regression_tests")
371 def TestGCM(self):
372 return self.SimpleTest("gcm", "gcm_unit_tests")
374 def TestGfx(self):
375 return self.SimpleTest("gfx", "gfx_unittests")
377 def TestGin(self):
378 return self.SimpleTest("gin", "gin_unittests")
380 def TestGoogleApis(self):
381 return self.SimpleTest("google_apis", "google_apis_unittests")
383 def TestGPU(self):
384 return self.SimpleTest("gpu", "gpu_unittests")
386 def TestIpc(self):
387 return self.SimpleTest("ipc", "ipc_tests",
388 valgrind_test_args=["--trace_children"])
390 def TestInstallerUtil(self):
391 return self.SimpleTest("installer_util", "installer_util_unittests")
393 def TestJingle(self):
394 return self.SimpleTest("chrome", "jingle_unittests")
396 def TestKeyboard(self):
397 return self.SimpleTest("keyboard", "keyboard_unittests")
399 def TestMedia(self):
400 return self.SimpleTest("chrome", "media_unittests")
402 def TestMessageCenter(self):
403 return self.SimpleTest("message_center", "message_center_unittests")
405 def TestMojoCommon(self):
406 return self.SimpleTest("mojo_common", "mojo_common_unittests")
408 def TestMojoPublicBindings(self):
409 return self.SimpleTest("mojo_public_bindings",
410 "mojo_public_bindings_unittests")
412 def TestMojoPublicEnv(self):
413 return self.SimpleTest("mojo_public_env",
414 "mojo_public_environment_unittests")
416 def TestMojoPublicSystem(self):
417 return self.SimpleTest("mojo_public_system",
418 "mojo_public_system_unittests")
420 def TestMojoPublicSysPerf(self):
421 return self.SimpleTest("mojo_public_sysperf",
422 "mojo_public_system_perftests")
424 def TestMojoPublicUtility(self):
425 return self.SimpleTest("mojo_public_utility",
426 "mojo_public_utility_unittests")
428 def TestMojoSystem(self):
429 return self.SimpleTest("mojo_system", "mojo_system_unittests")
431 def TestNet(self):
432 return self.SimpleTest("net", "net_unittests")
434 def TestNetPerf(self):
435 return self.SimpleTest("net", "net_perftests")
437 def TestPhoneNumber(self):
438 return self.SimpleTest("phonenumber", "libphonenumber_unittests")
440 def TestPPAPI(self):
441 return self.SimpleTest("chrome", "ppapi_unittests")
443 def TestPrinting(self):
444 return self.SimpleTest("chrome", "printing_unittests")
446 def TestRemoting(self):
447 return self.SimpleTest("chrome", "remoting_unittests",
448 cmd_args=[
449 "--ui-test-action-timeout=60000",
450 "--ui-test-action-max-timeout=150000"])
452 def TestSql(self):
453 return self.SimpleTest("chrome", "sql_unittests")
455 def TestSync(self):
456 return self.SimpleTest("chrome", "sync_unit_tests")
458 def TestLinuxSandbox(self):
459 return self.SimpleTest("sandbox", "sandbox_linux_unittests")
461 def TestUnit(self):
462 # http://crbug.com/51716
463 # Disabling all unit tests
464 # Problems reappeared after r119922
465 if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
466 logging.warning("unit_tests are disabled for memcheck on MacOS.")
467 return 0;
468 return self.SimpleTest("chrome", "unit_tests")
470 def TestUIBaseUnit(self):
471 return self.SimpleTest("chrome", "ui_base_unittests")
473 def TestUIChromeOS(self):
474 return self.SimpleTest("chrome", "ui_chromeos_unittests")
476 def TestURL(self):
477 return self.SimpleTest("chrome", "url_unittests")
479 def TestViews(self):
480 return self.SimpleTest("views", "views_unittests")
483 # Valgrind timeouts are in seconds.
484 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
485 # UI test timeouts are in milliseconds.
486 UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
487 "--ui-test-action-max-timeout=150000",
488 "--no-sandbox"]
490 # TODO(thestig) fine-tune these values.
491 # Valgrind timeouts are in seconds.
492 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
493 # Browser test timeouts are in milliseconds.
494 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
495 "--ui-test-action-max-timeout=800000",
496 "--no-sandbox"]
498 def TestBrowser(self):
499 return self.SimpleTest("chrome", "browser_tests",
500 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
501 cmd_args=self.BROWSER_TEST_ARGS)
503 def TestContentBrowser(self):
504 return self.SimpleTest("content", "content_browsertests",
505 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
506 cmd_args=self.BROWSER_TEST_ARGS)
508 def TestInteractiveUI(self):
509 return self.SimpleTest("chrome", "interactive_ui_tests",
510 valgrind_test_args=self.UI_VALGRIND_ARGS,
511 cmd_args=self.UI_TEST_ARGS)
513 def TestSafeBrowsing(self):
514 return self.SimpleTest("chrome", "safe_browsing_tests",
515 valgrind_test_args=self.UI_VALGRIND_ARGS,
516 cmd_args=(["--ui-test-action-max-timeout=450000"]))
518 def TestSyncIntegration(self):
519 return self.SimpleTest("chrome", "sync_integration_tests",
520 valgrind_test_args=self.UI_VALGRIND_ARGS,
521 cmd_args=(["--ui-test-action-max-timeout=450000"]))
523 def TestLayoutChunk(self, chunk_num, chunk_size):
524 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
525 # list of tests. Wrap around to beginning of list at end.
526 # If chunk_size is zero, run all tests in the list once.
527 # If a text file is given as argument, it is used as the list of tests.
528 assert((chunk_size == 0) != (len(self._args) == 0))
529 # Build the ginormous commandline in 'cmd'.
530 # It's going to be roughly
531 # python valgrind_test.py ...
532 # but we'll use the --indirect flag to valgrind_test.py
533 # to avoid valgrinding python.
534 # Start by building the valgrind_test.py commandline.
535 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
536 cmd = self._DefaultCommand(tool)
537 cmd.append("--trace_children")
538 cmd.append("--indirect_webkit_layout")
539 cmd.append("--ignore_exit_code")
540 # Now build script_cmd, the run-webkits-tests commandline.
541 # Store each chunk in its own directory so that we can find the data later
542 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
543 out_dir = os.path.join(path_utils.ScriptDir(), "latest")
544 out_dir = os.path.join(out_dir, chunk_dir)
545 if os.path.exists(out_dir):
546 old_files = glob.glob(os.path.join(out_dir, "*.txt"))
547 for f in old_files:
548 os.remove(f)
549 else:
550 os.makedirs(out_dir)
551 script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
552 "Scripts", "run-webkit-tests")
553 # http://crbug.com/260627: After the switch to content_shell from DRT, each
554 # test now brings up 3 processes. Under Valgrind, they become memory bound
555 # and can eventually OOM if we don't reduce the total count.
556 # It'd be nice if content_shell automatically throttled the startup of new
557 # tests if we're low on memory.
558 jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
559 script_cmd = ["python", script, "-v",
560 # run a separate DumpRenderTree for each test
561 "--batch-size=1",
562 "--fully-parallel",
563 "--child-processes=%d" % jobs,
564 "--time-out-ms=800000",
565 "--no-retry-failures", # retrying takes too much time
566 # http://crbug.com/176908: Don't launch a browser when done.
567 "--no-show-results",
568 "--nocheck-sys-deps"]
569 # Pass build mode to run-webkit-tests. We aren't passed it directly,
570 # so parse it out of build_dir. run-webkit-tests can only handle
571 # the two values "Release" and "Debug".
572 # TODO(Hercules): unify how all our scripts pass around build mode
573 # (--mode / --target / --build-dir / --debug)
574 if self._options.build_dir:
575 build_root, mode = os.path.split(self._options.build_dir)
576 script_cmd.extend(["--build-directory", build_root, "--target", mode])
577 if (chunk_size > 0):
578 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
579 if len(self._args):
580 # if the arg is a txt file, then treat it as a list of tests
581 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
582 script_cmd.append("--test-list=%s" % self._args[0])
583 else:
584 script_cmd.extend(self._args)
585 self._AppendGtestFilter(tool, "layout", script_cmd)
586 # Now run script_cmd with the wrapper in cmd
587 cmd.extend(["--"])
588 cmd.extend(script_cmd)
590 # Layout tests often times fail quickly, but the buildbot remains green.
591 # Detect this situation when running with the default chunk size.
592 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
593 min_runtime_in_seconds=120
594 else:
595 min_runtime_in_seconds=0
596 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
597 return ret
600 def TestLayout(self):
601 # A "chunk file" is maintained in the local directory so that each test
602 # runs a slice of the layout tests of size chunk_size that increments with
603 # each run. Since tests can be added and removed from the layout tests at
604 # any time, this is not going to give exact coverage, but it will allow us
605 # to continuously run small slices of the layout tests under valgrind rather
606 # than having to run all of them in one shot.
607 chunk_size = self._options.num_tests
608 if chunk_size == 0 or len(self._args):
609 return self.TestLayoutChunk(0, 0)
610 chunk_num = 0
611 chunk_file = os.path.join("valgrind_layout_chunk.txt")
612 logging.info("Reading state from " + chunk_file)
613 try:
614 f = open(chunk_file)
615 if f:
616 chunk_str = f.read()
617 if len(chunk_str):
618 chunk_num = int(chunk_str)
619 # This should be enough so that we have a couple of complete runs
620 # of test data stored in the archive (although note that when we loop
621 # that we almost guaranteed won't be at the end of the test list)
622 if chunk_num > 10000:
623 chunk_num = 0
624 f.close()
625 except IOError, (errno, strerror):
626 logging.error("error reading from file %s (%d, %s)" % (chunk_file,
627 errno, strerror))
628 # Save the new chunk size before running the tests. Otherwise if a
629 # particular chunk hangs the bot, the chunk number will never get
630 # incremented and the bot will be wedged.
631 logging.info("Saving state to " + chunk_file)
632 try:
633 f = open(chunk_file, "w")
634 chunk_num += 1
635 f.write("%d" % chunk_num)
636 f.close()
637 except IOError, (errno, strerror):
638 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
639 strerror))
640 # Since we're running small chunks of the layout tests, it's important to
641 # mark the ones that have errors in them. These won't be visible in the
642 # summary list for long, but will be useful for someone reviewing this bot.
643 return self.TestLayoutChunk(chunk_num, chunk_size)
645 # The known list of tests.
646 # Recognise the original abbreviations as well as full executable names.
647 _test_list = {
648 "cmdline" : RunCmdLine,
649 "addressinput": TestAddressInput,
650 "libaddressinput_unittests": TestAddressInput,
651 "accessibility": TestAccessibility,
652 "angle": TestAngle, "angle_unittests": TestAngle,
653 "app_list": TestAppList, "app_list_unittests": TestAppList,
654 "ash": TestAsh, "ash_unittests": TestAsh,
655 "ash_shell": TestAshShell, "ash_shell_unittests": TestAshShell,
656 "aura": TestAura, "aura_unittests": TestAura,
657 "base": TestBase, "base_unittests": TestBase,
658 "blink_heap": TestBlinkHeap,
659 "blink_platform": TestBlinkPlatform,
660 "browser": TestBrowser, "browser_tests": TestBrowser,
661 "cacheinvalidation": TestCacheInvalidation,
662 "cacheinvalidation_unittests": TestCacheInvalidation,
663 "cast": TestCast, "cast_unittests": TestCast,
664 "cc": TestCC, "cc_unittests": TestCC,
665 "chrome_app": TestChromeApp,
666 "chrome_elf": TestChromeElf,
667 "chromedriver": TestChromeDriver,
668 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
669 "cloud_print": TestCloudPrint,
670 "cloud_print_unittests": TestCloudPrint,
671 "components": TestComponents,"components_unittests": TestComponents,
672 "compositor": TestCompositor,"compositor_unittests": TestCompositor,
673 "content": TestContent, "content_unittests": TestContent,
674 "content_browsertests": TestContentBrowser,
675 "courgette": TestCourgette, "courgette_unittests": TestCourgette,
676 "crypto": TestCrypto, "crypto_unittests": TestCrypto,
677 "device": TestDevice, "device_unittests": TestDevice,
678 "display": TestDisplay, "display_unittests": TestDisplay,
679 "events": TestEvents, "events_unittests": TestEvents,
680 "extensions": TestExtensions, "extensions_unittests": TestExtensions,
681 "ffmpeg": TestFFmpeg, "ffmpeg_unittests": TestFFmpeg,
682 "ffmpeg_regression_tests": TestFFmpegRegressions,
683 "gcm": TestGCM, "gcm_unit_tests": TestGCM,
684 "gin": TestGin, "gin_unittests": TestGin,
685 "gfx": TestGfx, "gfx_unittests": TestGfx,
686 "google_apis": TestGoogleApis,
687 "gpu": TestGPU, "gpu_unittests": TestGPU,
688 "ipc": TestIpc, "ipc_tests": TestIpc,
689 "installer_util": TestInstallerUtil,
690 "interactive_ui": TestInteractiveUI,
691 "jingle": TestJingle, "jingle_unittests": TestJingle,
692 "keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
693 "layout": TestLayout, "layout_tests": TestLayout,
694 "media": TestMedia, "media_unittests": TestMedia,
695 "message_center": TestMessageCenter,
696 "message_center_unittests" : TestMessageCenter,
697 "mojo_common": TestMojoCommon,
698 "mojo_system": TestMojoSystem,
699 "mojo_public_system": TestMojoPublicSystem,
700 "mojo_public_utility": TestMojoPublicUtility,
701 "mojo_public_bindings": TestMojoPublicBindings,
702 "mojo_public_env": TestMojoPublicEnv,
703 "mojo_public_sysperf": TestMojoPublicSysPerf,
704 "net": TestNet, "net_unittests": TestNet,
705 "net_perf": TestNetPerf, "net_perftests": TestNetPerf,
706 "phonenumber": TestPhoneNumber,
707 "libphonenumber_unittests": TestPhoneNumber,
708 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
709 "printing": TestPrinting, "printing_unittests": TestPrinting,
710 "remoting": TestRemoting, "remoting_unittests": TestRemoting,
711 "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
712 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
713 "sql": TestSql, "sql_unittests": TestSql,
714 "sync": TestSync, "sync_unit_tests": TestSync,
715 "sync_integration_tests": TestSyncIntegration,
716 "sync_integration": TestSyncIntegration,
717 "ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
718 "ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,
719 "unit": TestUnit, "unit_tests": TestUnit,
720 "url": TestURL, "url_unittests": TestURL,
721 "views": TestViews, "views_unittests": TestViews,
722 "webkit": TestLayout,
726 def _main():
727 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
728 "[-t <test> ...]")
730 parser.add_option("--help-tests", dest="help_tests", action="store_true",
731 default=False, help="List all available tests")
732 parser.add_option("-b", "--build-dir",
733 help="the location of the compiler output")
734 parser.add_option("--target", help="Debug or Release")
735 parser.add_option("-t", "--test", action="append", default=[],
736 help="which test to run, supports test:gtest_filter format "
737 "as well.")
738 parser.add_option("--baseline", action="store_true", default=False,
739 help="generate baseline data instead of validating")
740 parser.add_option("--gtest_filter",
741 help="additional arguments to --gtest_filter")
742 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
743 parser.add_option("--gtest_shuffle", action="store_true", default=False,
744 help="Randomize tests' orders on every iteration.")
745 parser.add_option("--gtest_break_on_failure", action="store_true",
746 default=False,
747 help="Drop in to debugger on assertion failure. Also "
748 "useful for forcing tests to exit with a stack dump "
749 "on the first assertion failure when running with "
750 "--gtest_repeat=-1")
751 parser.add_option("-v", "--verbose", action="store_true", default=False,
752 help="verbose output - enable debug log messages")
753 parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
754 help="specify a valgrind tool to run the tests under")
755 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
756 help="specify custom flags for the selected valgrind tool")
757 parser.add_option("--keep_logs", action="store_true", default=False,
758 help="store memory tool logs in the <tool>.logs directory "
759 "instead of /tmp.\nThis can be useful for tool "
760 "developers/maintainers.\nPlease note that the <tool>"
761 ".logs directory will be clobbered on tool startup.")
762 parser.add_option("-n", "--num_tests", type="int",
763 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
764 help="for layout tests: # of subtests per run. 0 for all.")
765 # TODO(thestig) Remove this if we can.
766 parser.add_option("--gtest_color", dest="gtest_color", default="no",
767 help="dummy compatibility flag for sharding_supervisor.")
768 parser.add_option("--brave-new-test-launcher", action="store_true",
769 help="run the tests with --brave-new-test-launcher")
770 parser.add_option("--test-launcher-bot-mode", action="store_true",
771 help="run the tests with --test-launcher-bot-mode")
773 options, args = parser.parse_args()
775 # Bake target into build_dir.
776 if options.target and options.build_dir:
777 assert (options.target !=
778 os.path.basename(os.path.dirname(options.build_dir)))
779 options.build_dir = os.path.join(os.path.abspath(options.build_dir),
780 options.target)
782 if options.verbose:
783 logging_utils.config_root(logging.DEBUG)
784 else:
785 logging_utils.config_root()
787 if options.help_tests:
788 ChromeTests.ShowTests()
789 return 0
791 if not options.test:
792 parser.error("--test not specified")
794 if len(options.test) != 1 and options.gtest_filter:
795 parser.error("--gtest_filter and multiple tests don't make sense together")
797 for t in options.test:
798 tests = ChromeTests(options, args, t)
799 ret = tests.Run()
800 if ret: return ret
801 return 0
804 if __name__ == "__main__":
805 sys.exit(_main())