2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 ''' Runs various chrome tests through valgrind_test.py.'''
10 import multiprocessing
23 class TestNotFound(Exception): pass
25 class MultipleGTestFiltersSpecified(Exception): pass
27 class BuildDirNotFound(Exception): pass
29 class BuildDirAmbiguous(Exception): pass
31 class ExecutableNotFound(Exception): pass
33 class BadBinary(Exception): pass
36 SLOW_TOOLS
= ["memcheck", "tsan", "tsan_rv", "drmemory"]
37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
= 300
39 def __init__(self
, options
, args
, test
):
41 (self
._test
, self
._gtest
_filter
) = test
.split(':', 1)
44 self
._gtest
_filter
= options
.gtest_filter
46 if self
._test
not in self
._test
_list
:
47 raise TestNotFound("Unknown test: %s" % test
)
49 if options
.gtest_filter
and options
.gtest_filter
!= self
._gtest
_filter
:
50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
51 "and --test %s" % test
)
53 self
._options
= options
56 script_dir
= path_utils
.ScriptDir()
57 # Compute the top of the tree (the "source dir") from the script dir (where
58 # this script lives). We assume that the script dir is in tools/valgrind/
59 # relative to the top of the tree.
60 self
._source
_dir
= os
.path
.dirname(os
.path
.dirname(script_dir
))
61 # since this path is used for string matching, make sure it's always
62 # an absolute Unix-style path
63 self
._source
_dir
= os
.path
.abspath(self
._source
_dir
).replace('\\', '/')
64 valgrind_test_script
= os
.path
.join(script_dir
, "valgrind_test.py")
65 self
._command
_preamble
= ["--source-dir=%s" % (self
._source
_dir
)]
67 if not self
._options
.build_dir
:
69 os
.path
.join(self
._source
_dir
, "xcodebuild", "Debug"),
70 os
.path
.join(self
._source
_dir
, "out", "Debug"),
71 os
.path
.join(self
._source
_dir
, "build", "Debug"),
73 build_dir
= [d
for d
in dirs
if os
.path
.isdir(d
)]
74 if len(build_dir
) > 1:
75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
76 "%s\nPlease specify just one "
77 "using --build-dir" % ", ".join(build_dir
))
79 self
._options
.build_dir
= build_dir
[0]
81 self
._options
.build_dir
= None
83 if self
._options
.build_dir
:
84 build_dir
= os
.path
.abspath(self
._options
.build_dir
)
85 self
._command
_preamble
+= ["--build-dir=%s" % (self
._options
.build_dir
)]
87 def _EnsureBuildDirFound(self
):
88 if not self
._options
.build_dir
:
89 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
90 "specify it manually using --build-dir")
92 def _DefaultCommand(self
, tool
, exe
=None, valgrind_test_args
=None):
93 '''Generates the default command array that most tests will use.'''
94 if exe
and common
.IsWindows():
97 cmd
= list(self
._command
_preamble
)
99 # Find all suppressions matching the following pattern:
100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
101 # and list them with --suppressions= prefix.
102 script_dir
= path_utils
.ScriptDir()
103 tool_name
= tool
.ToolName();
104 suppression_file
= os
.path
.join(script_dir
, tool_name
, "suppressions.txt")
105 if os
.path
.exists(suppression_file
):
106 cmd
.append("--suppressions=%s" % suppression_file
)
107 # Platform-specific suppression
108 for platform
in common
.PlatformNames():
109 platform_suppression_file
= \
110 os
.path
.join(script_dir
, tool_name
, 'suppressions_%s.txt' % platform
)
111 if os
.path
.exists(platform_suppression_file
):
112 cmd
.append("--suppressions=%s" % platform_suppression_file
)
114 if self
._options
.valgrind_tool_flags
:
115 cmd
+= self
._options
.valgrind_tool_flags
.split(" ")
116 if self
._options
.keep_logs
:
117 cmd
+= ["--keep_logs"]
118 if valgrind_test_args
!= None:
119 for arg
in valgrind_test_args
:
122 self
._EnsureBuildDirFound
()
123 exe_path
= os
.path
.join(self
._options
.build_dir
, exe
)
124 if not os
.path
.exists(exe_path
):
125 raise ExecutableNotFound("Couldn't find '%s'" % exe_path
)
127 # Make sure we don't try to test ASan-built binaries
128 # with other dynamic instrumentation-based tools.
129 # TODO(timurrrr): also check TSan and MSan?
130 # `nm` might not be available, so use try-except.
132 # Do not perform this check on OS X, as 'nm' on 10.6 can't handle
133 # binaries built with Clang 3.5+.
134 if not common
.IsMac():
135 nm_output
= subprocess
.check_output(["nm", exe_path
])
136 if nm_output
.find("__asan_init") != -1:
137 raise BadBinary("You're trying to run an executable instrumented "
138 "with AddressSanitizer under %s. Please provide "
139 "an uninstrumented executable." % tool_name
)
144 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
145 # so we can find the slowpokes.
146 cmd
.append("--gtest_print_time")
147 # Built-in test launcher for gtest-based executables runs tests using
148 # multiple process by default. Force the single-process mode back.
149 cmd
.append("--single-process-tests")
150 if self
._options
.gtest_repeat
:
151 cmd
.append("--gtest_repeat=%s" % self
._options
.gtest_repeat
)
152 if self
._options
.gtest_shuffle
:
153 cmd
.append("--gtest_shuffle")
154 if self
._options
.gtest_break_on_failure
:
155 cmd
.append("--gtest_break_on_failure")
156 if self
._options
.brave_new_test_launcher
:
157 cmd
.append("--brave-new-test-launcher")
158 if self
._options
.test_launcher_bot_mode
:
159 cmd
.append("--test-launcher-bot-mode")
163 ''' Runs the test specified by command-line argument --test '''
164 logging
.info("running test %s" % (self
._test
))
165 return self
._test
_list
[self
._test
](self
)
167 def _AppendGtestFilter(self
, tool
, name
, cmd
):
168 '''Append an appropriate --gtest_filter flag to the googletest binary
170 If the user passed his own filter mentioning only one test, just use it.
171 Othewise, filter out tests listed in the appropriate gtest_exclude files.
173 if (self
._gtest
_filter
and
174 ":" not in self
._gtest
_filter
and
175 "?" not in self
._gtest
_filter
and
176 "*" not in self
._gtest
_filter
):
177 cmd
.append("--gtest_filter=%s" % self
._gtest
_filter
)
181 gtest_files_dir
= os
.path
.join(path_utils
.ScriptDir(), "gtest_exclude")
183 gtest_filter_files
= [
184 os
.path
.join(gtest_files_dir
, name
+ ".gtest-%s.txt" % tool
.ToolName())]
185 # Use ".gtest.txt" files only for slow tools, as they now contain
186 # Valgrind- and Dr.Memory-specific filters.
187 # TODO(glider): rename the files to ".gtest_slow.txt"
188 if tool
.ToolName() in ChromeTests
.SLOW_TOOLS
:
189 gtest_filter_files
+= [os
.path
.join(gtest_files_dir
, name
+ ".gtest.txt")]
190 for platform_suffix
in common
.PlatformNames():
191 gtest_filter_files
+= [
192 os
.path
.join(gtest_files_dir
, name
+ ".gtest_%s.txt" % platform_suffix
),
193 os
.path
.join(gtest_files_dir
, name
+ ".gtest-%s_%s.txt" % \
194 (tool
.ToolName(), platform_suffix
))]
195 logging
.info("Reading gtest exclude filter files:")
196 for filename
in gtest_filter_files
:
197 # strip the leading absolute path (may be very long on the bot)
198 # and the following / or \.
199 readable_filename
= filename
.replace("\\", "/") # '\' on Windows
200 readable_filename
= readable_filename
.replace(self
._source
_dir
, "")[1:]
201 if not os
.path
.exists(filename
):
202 logging
.info(" \"%s\" - not found" % readable_filename
)
204 logging
.info(" \"%s\" - OK" % readable_filename
)
205 f
= open(filename
, 'r')
206 for line
in f
.readlines():
207 if line
.startswith("#") or line
.startswith("//") or line
.isspace():
210 test_prefixes
= ["FLAKY", "FAILS"]
211 for p
in test_prefixes
:
212 # Strip prefixes from the test names.
213 line
= line
.replace(".%s_" % p
, ".")
214 # Exclude the original test name.
216 if line
[-2:] != ".*":
217 # List all possible prefixes if line doesn't end with ".*".
218 for p
in test_prefixes
:
219 filters
.append(line
.replace(".", ".%s_" % p
))
220 # Get rid of duplicates.
221 filters
= set(filters
)
222 gtest_filter
= self
._gtest
_filter
226 if gtest_filter
.find("-") < 0:
230 gtest_filter
+= ":".join(filters
)
232 cmd
.append("--gtest_filter=%s" % gtest_filter
)
237 for name
, test_function
in ChromeTests
._test
_list
.iteritems():
238 test_to_names
.setdefault(test_function
, []).append(name
)
241 for names
in test_to_names
.itervalues():
242 names
.sort(key
=lambda name
: len(name
))
243 name_to_aliases
[names
[0]] = names
[1:]
246 print "Available tests:"
247 print "----------------"
248 for name
, aliases
in sorted(name_to_aliases
.iteritems()):
250 print " {} (aka {})".format(name
, ', '.join(aliases
))
252 print " {}".format(name
)
254 def SetupLdPath(self
, requires_build_dir
):
255 if requires_build_dir
:
256 self
._EnsureBuildDirFound
()
257 elif not self
._options
.build_dir
:
260 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
261 if (os
.getenv("LD_LIBRARY_PATH")):
262 os
.putenv("LD_LIBRARY_PATH", "%s:%s" % (os
.getenv("LD_LIBRARY_PATH"),
263 self
._options
.build_dir
))
265 os
.putenv("LD_LIBRARY_PATH", self
._options
.build_dir
)
267 def SimpleTest(self
, module
, name
, valgrind_test_args
=None, cmd_args
=None):
268 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
269 cmd
= self
._DefaultCommand
(tool
, name
, valgrind_test_args
)
270 self
._AppendGtestFilter
(tool
, name
, cmd
)
271 cmd
.extend(['--test-tiny-timeout=1000'])
275 self
.SetupLdPath(True)
276 return tool
.Run(cmd
, module
)
278 def RunCmdLine(self
):
279 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
280 cmd
= self
._DefaultCommand
(tool
, None, self
._args
)
281 self
.SetupLdPath(False)
282 return tool
.Run(cmd
, None)
284 def TestAccessibility(self
):
285 return self
.SimpleTest("accessibility", "accessibility_unittests")
287 def TestAddressInput(self
):
288 return self
.SimpleTest("addressinput", "libaddressinput_unittests")
291 return self
.SimpleTest("angle", "angle_unittests")
293 def TestAppList(self
):
294 return self
.SimpleTest("app_list", "app_list_unittests")
297 return self
.SimpleTest("ash", "ash_unittests")
299 def TestAshShell(self
):
300 return self
.SimpleTest("ash_shelf", "ash_shell_unittests")
303 return self
.SimpleTest("aura", "aura_unittests")
306 return self
.SimpleTest("base", "base_unittests")
308 def TestBlinkHeap(self
):
309 return self
.SimpleTest("blink_heap", "blink_heap_unittests")
311 def TestBlinkPlatform(self
):
312 return self
.SimpleTest("blink_platform", "blink_platform_unittests")
314 def TestCacheInvalidation(self
):
315 return self
.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
318 return self
.SimpleTest("chrome", "cast_unittests")
321 return self
.SimpleTest("cc", "cc_unittests")
323 def TestChromeApp(self
):
324 return self
.SimpleTest("chrome_app", "chrome_app_unittests")
326 def TestChromeElf(self
):
327 return self
.SimpleTest("chrome_elf", "chrome_elf_unittests")
329 def TestChromeDriver(self
):
330 return self
.SimpleTest("chromedriver", "chromedriver_unittests")
332 def TestChromeOS(self
):
333 return self
.SimpleTest("chromeos", "chromeos_unittests")
335 def TestCloudPrint(self
):
336 return self
.SimpleTest("cloud_print", "cloud_print_unittests")
338 def TestComponents(self
):
339 return self
.SimpleTest("components", "components_unittests")
341 def TestCompositor(self
):
342 return self
.SimpleTest("compositor", "compositor_unittests")
344 def TestContent(self
):
345 return self
.SimpleTest("content", "content_unittests")
347 def TestCourgette(self
):
348 return self
.SimpleTest("courgette", "courgette_unittests")
350 def TestCrypto(self
):
351 return self
.SimpleTest("crypto", "crypto_unittests")
353 def TestDevice(self
):
354 return self
.SimpleTest("device", "device_unittests")
356 def TestDisplay(self
):
357 return self
.SimpleTest("display", "display_unittests")
359 def TestEvents(self
):
360 return self
.SimpleTest("events", "events_unittests")
362 def TestExtensions(self
):
363 return self
.SimpleTest("extensions", "extensions_unittests")
365 def TestFFmpeg(self
):
366 return self
.SimpleTest("chrome", "ffmpeg_unittests")
368 def TestFFmpegRegressions(self
):
369 return self
.SimpleTest("chrome", "ffmpeg_regression_tests")
372 return self
.SimpleTest("gcm", "gcm_unit_tests")
375 return self
.SimpleTest("gfx", "gfx_unittests")
378 return self
.SimpleTest("gin", "gin_unittests")
380 def TestGoogleApis(self
):
381 return self
.SimpleTest("google_apis", "google_apis_unittests")
384 return self
.SimpleTest("gpu", "gpu_unittests")
387 return self
.SimpleTest("ipc", "ipc_tests",
388 valgrind_test_args
=["--trace_children"])
390 def TestInstallerUtil(self
):
391 return self
.SimpleTest("installer_util", "installer_util_unittests")
393 def TestJingle(self
):
394 return self
.SimpleTest("chrome", "jingle_unittests")
396 def TestKeyboard(self
):
397 return self
.SimpleTest("keyboard", "keyboard_unittests")
400 return self
.SimpleTest("chrome", "media_unittests")
402 def TestMessageCenter(self
):
403 return self
.SimpleTest("message_center", "message_center_unittests")
405 def TestMojoCommon(self
):
406 return self
.SimpleTest("mojo_common", "mojo_common_unittests")
408 def TestMojoPublicBindings(self
):
409 return self
.SimpleTest("mojo_public_bindings",
410 "mojo_public_bindings_unittests")
412 def TestMojoPublicEnv(self
):
413 return self
.SimpleTest("mojo_public_env",
414 "mojo_public_environment_unittests")
416 def TestMojoPublicSystem(self
):
417 return self
.SimpleTest("mojo_public_system",
418 "mojo_public_system_unittests")
420 def TestMojoPublicSysPerf(self
):
421 return self
.SimpleTest("mojo_public_sysperf",
422 "mojo_public_system_perftests")
424 def TestMojoPublicUtility(self
):
425 return self
.SimpleTest("mojo_public_utility",
426 "mojo_public_utility_unittests")
428 def TestMojoSystem(self
):
429 return self
.SimpleTest("mojo_system", "mojo_system_unittests")
432 return self
.SimpleTest("net", "net_unittests")
434 def TestNetPerf(self
):
435 return self
.SimpleTest("net", "net_perftests")
437 def TestPhoneNumber(self
):
438 return self
.SimpleTest("phonenumber", "libphonenumber_unittests")
441 return self
.SimpleTest("chrome", "ppapi_unittests")
443 def TestPrinting(self
):
444 return self
.SimpleTest("chrome", "printing_unittests")
446 def TestRemoting(self
):
447 return self
.SimpleTest("chrome", "remoting_unittests",
449 "--ui-test-action-timeout=60000",
450 "--ui-test-action-max-timeout=150000"])
453 return self
.SimpleTest("chrome", "sql_unittests")
456 return self
.SimpleTest("chrome", "sync_unit_tests")
458 def TestLinuxSandbox(self
):
459 return self
.SimpleTest("sandbox", "sandbox_linux_unittests")
462 # http://crbug.com/51716
463 # Disabling all unit tests
464 # Problems reappeared after r119922
465 if common
.IsMac() and (self
._options
.valgrind_tool
== "memcheck"):
466 logging
.warning("unit_tests are disabled for memcheck on MacOS.")
468 return self
.SimpleTest("chrome", "unit_tests")
470 def TestUIBaseUnit(self
):
471 return self
.SimpleTest("chrome", "ui_base_unittests")
474 return self
.SimpleTest("chrome", "url_unittests")
477 return self
.SimpleTest("views", "views_unittests")
480 # Valgrind timeouts are in seconds.
481 UI_VALGRIND_ARGS
= ["--timeout=14400", "--trace_children", "--indirect"]
482 # UI test timeouts are in milliseconds.
483 UI_TEST_ARGS
= ["--ui-test-action-timeout=60000",
484 "--ui-test-action-max-timeout=150000",
487 # TODO(thestig) fine-tune these values.
488 # Valgrind timeouts are in seconds.
489 BROWSER_VALGRIND_ARGS
= ["--timeout=50000", "--trace_children", "--indirect"]
490 # Browser test timeouts are in milliseconds.
491 BROWSER_TEST_ARGS
= ["--ui-test-action-timeout=400000",
492 "--ui-test-action-max-timeout=800000",
495 def TestBrowser(self
):
496 return self
.SimpleTest("chrome", "browser_tests",
497 valgrind_test_args
=self
.BROWSER_VALGRIND_ARGS
,
498 cmd_args
=self
.BROWSER_TEST_ARGS
)
500 def TestContentBrowser(self
):
501 return self
.SimpleTest("content", "content_browsertests",
502 valgrind_test_args
=self
.BROWSER_VALGRIND_ARGS
,
503 cmd_args
=self
.BROWSER_TEST_ARGS
)
505 def TestInteractiveUI(self
):
506 return self
.SimpleTest("chrome", "interactive_ui_tests",
507 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
508 cmd_args
=self
.UI_TEST_ARGS
)
510 def TestSafeBrowsing(self
):
511 return self
.SimpleTest("chrome", "safe_browsing_tests",
512 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
513 cmd_args
=(["--ui-test-action-max-timeout=450000"]))
515 def TestSyncIntegration(self
):
516 return self
.SimpleTest("chrome", "sync_integration_tests",
517 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
518 cmd_args
=(["--ui-test-action-max-timeout=450000"]))
520 def TestLayoutChunk(self
, chunk_num
, chunk_size
):
521 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
522 # list of tests. Wrap around to beginning of list at end.
523 # If chunk_size is zero, run all tests in the list once.
524 # If a text file is given as argument, it is used as the list of tests.
525 assert((chunk_size
== 0) != (len(self
._args
) == 0))
526 # Build the ginormous commandline in 'cmd'.
527 # It's going to be roughly
528 # python valgrind_test.py ...
529 # but we'll use the --indirect flag to valgrind_test.py
530 # to avoid valgrinding python.
531 # Start by building the valgrind_test.py commandline.
532 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
533 cmd
= self
._DefaultCommand
(tool
)
534 cmd
.append("--trace_children")
535 cmd
.append("--indirect_webkit_layout")
536 cmd
.append("--ignore_exit_code")
537 # Now build script_cmd, the run-webkits-tests commandline.
538 # Store each chunk in its own directory so that we can find the data later
539 chunk_dir
= os
.path
.join("layout", "chunk_%05d" % chunk_num
)
540 out_dir
= os
.path
.join(path_utils
.ScriptDir(), "latest")
541 out_dir
= os
.path
.join(out_dir
, chunk_dir
)
542 if os
.path
.exists(out_dir
):
543 old_files
= glob
.glob(os
.path
.join(out_dir
, "*.txt"))
548 script
= os
.path
.join(self
._source
_dir
, "third_party", "WebKit", "Tools",
549 "Scripts", "run-webkit-tests")
550 # http://crbug.com/260627: After the switch to content_shell from DRT, each
551 # test now brings up 3 processes. Under Valgrind, they become memory bound
552 # and can eventually OOM if we don't reduce the total count.
553 # It'd be nice if content_shell automatically throttled the startup of new
554 # tests if we're low on memory.
555 jobs
= max(1, int(multiprocessing
.cpu_count() * 0.3))
556 script_cmd
= ["python", script
, "-v",
557 # run a separate DumpRenderTree for each test
560 "--child-processes=%d" % jobs
,
561 "--time-out-ms=800000",
562 "--no-retry-failures", # retrying takes too much time
563 # http://crbug.com/176908: Don't launch a browser when done.
565 "--nocheck-sys-deps"]
566 # Pass build mode to run-webkit-tests. We aren't passed it directly,
567 # so parse it out of build_dir. run-webkit-tests can only handle
568 # the two values "Release" and "Debug".
569 # TODO(Hercules): unify how all our scripts pass around build mode
570 # (--mode / --target / --build-dir / --debug)
571 if self
._options
.build_dir
:
572 build_root
, mode
= os
.path
.split(self
._options
.build_dir
)
573 script_cmd
.extend(["--build-directory", build_root
, "--target", mode
])
575 script_cmd
.append("--run-chunk=%d:%d" % (chunk_num
, chunk_size
))
577 # if the arg is a txt file, then treat it as a list of tests
578 if os
.path
.isfile(self
._args
[0]) and self
._args
[0][-4:] == ".txt":
579 script_cmd
.append("--test-list=%s" % self
._args
[0])
581 script_cmd
.extend(self
._args
)
582 self
._AppendGtestFilter
(tool
, "layout", script_cmd
)
583 # Now run script_cmd with the wrapper in cmd
585 cmd
.extend(script_cmd
)
587 # Layout tests often times fail quickly, but the buildbot remains green.
588 # Detect this situation when running with the default chunk size.
589 if chunk_size
== self
.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
:
590 min_runtime_in_seconds
=120
592 min_runtime_in_seconds
=0
593 ret
= tool
.Run(cmd
, "layout", min_runtime_in_seconds
=min_runtime_in_seconds
)
597 def TestLayout(self
):
598 # A "chunk file" is maintained in the local directory so that each test
599 # runs a slice of the layout tests of size chunk_size that increments with
600 # each run. Since tests can be added and removed from the layout tests at
601 # any time, this is not going to give exact coverage, but it will allow us
602 # to continuously run small slices of the layout tests under valgrind rather
603 # than having to run all of them in one shot.
604 chunk_size
= self
._options
.num_tests
605 if chunk_size
== 0 or len(self
._args
):
606 return self
.TestLayoutChunk(0, 0)
608 chunk_file
= os
.path
.join("valgrind_layout_chunk.txt")
609 logging
.info("Reading state from " + chunk_file
)
615 chunk_num
= int(chunk_str
)
616 # This should be enough so that we have a couple of complete runs
617 # of test data stored in the archive (although note that when we loop
618 # that we almost guaranteed won't be at the end of the test list)
619 if chunk_num
> 10000:
622 except IOError, (errno
, strerror
):
623 logging
.error("error reading from file %s (%d, %s)" % (chunk_file
,
625 # Save the new chunk size before running the tests. Otherwise if a
626 # particular chunk hangs the bot, the chunk number will never get
627 # incremented and the bot will be wedged.
628 logging
.info("Saving state to " + chunk_file
)
630 f
= open(chunk_file
, "w")
632 f
.write("%d" % chunk_num
)
634 except IOError, (errno
, strerror
):
635 logging
.error("error writing to file %s (%d, %s)" % (chunk_file
, errno
,
637 # Since we're running small chunks of the layout tests, it's important to
638 # mark the ones that have errors in them. These won't be visible in the
639 # summary list for long, but will be useful for someone reviewing this bot.
640 return self
.TestLayoutChunk(chunk_num
, chunk_size
)
642 # The known list of tests.
643 # Recognise the original abbreviations as well as full executable names.
645 "cmdline" : RunCmdLine
,
646 "addressinput": TestAddressInput
,
647 "libaddressinput_unittests": TestAddressInput
,
648 "accessibility": TestAccessibility
,
649 "angle": TestAngle
, "angle_unittests": TestAngle
,
650 "app_list": TestAppList
, "app_list_unittests": TestAppList
,
651 "ash": TestAsh
, "ash_unittests": TestAsh
,
652 "ash_shell": TestAshShell
, "ash_shell_unittests": TestAshShell
,
653 "aura": TestAura
, "aura_unittests": TestAura
,
654 "base": TestBase
, "base_unittests": TestBase
,
655 "blink_heap": TestBlinkHeap
,
656 "blink_platform": TestBlinkPlatform
,
657 "browser": TestBrowser
, "browser_tests": TestBrowser
,
658 "cacheinvalidation": TestCacheInvalidation
,
659 "cacheinvalidation_unittests": TestCacheInvalidation
,
660 "cast": TestCast
, "cast_unittests": TestCast
,
661 "cc": TestCC
, "cc_unittests": TestCC
,
662 "chrome_app": TestChromeApp
,
663 "chrome_elf": TestChromeElf
,
664 "chromedriver": TestChromeDriver
,
665 "chromeos": TestChromeOS
, "chromeos_unittests": TestChromeOS
,
666 "cloud_print": TestCloudPrint
,
667 "cloud_print_unittests": TestCloudPrint
,
668 "components": TestComponents
,"components_unittests": TestComponents
,
669 "compositor": TestCompositor
,"compositor_unittests": TestCompositor
,
670 "content": TestContent
, "content_unittests": TestContent
,
671 "content_browsertests": TestContentBrowser
,
672 "courgette": TestCourgette
, "courgette_unittests": TestCourgette
,
673 "crypto": TestCrypto
, "crypto_unittests": TestCrypto
,
674 "device": TestDevice
, "device_unittests": TestDevice
,
675 "display": TestDisplay
, "display_unittests": TestDisplay
,
676 "events": TestEvents
, "events_unittests": TestEvents
,
677 "extensions": TestExtensions
, "extensions_unittests": TestExtensions
,
678 "ffmpeg": TestFFmpeg
, "ffmpeg_unittests": TestFFmpeg
,
679 "ffmpeg_regression_tests": TestFFmpegRegressions
,
680 "gcm": TestGCM
, "gcm_unit_tests": TestGCM
,
681 "gin": TestGin
, "gin_unittests": TestGin
,
682 "gfx": TestGfx
, "gfx_unittests": TestGfx
,
683 "google_apis": TestGoogleApis
,
684 "gpu": TestGPU
, "gpu_unittests": TestGPU
,
685 "ipc": TestIpc
, "ipc_tests": TestIpc
,
686 "installer_util": TestInstallerUtil
,
687 "interactive_ui": TestInteractiveUI
,
688 "jingle": TestJingle
, "jingle_unittests": TestJingle
,
689 "keyboard": TestKeyboard
, "keyboard_unittests": TestKeyboard
,
690 "layout": TestLayout
, "layout_tests": TestLayout
,
691 "media": TestMedia
, "media_unittests": TestMedia
,
692 "message_center": TestMessageCenter
,
693 "message_center_unittests" : TestMessageCenter
,
694 "mojo_common": TestMojoCommon
,
695 "mojo_system": TestMojoSystem
,
696 "mojo_public_system": TestMojoPublicSystem
,
697 "mojo_public_utility": TestMojoPublicUtility
,
698 "mojo_public_bindings": TestMojoPublicBindings
,
699 "mojo_public_env": TestMojoPublicEnv
,
700 "mojo_public_sysperf": TestMojoPublicSysPerf
,
701 "net": TestNet
, "net_unittests": TestNet
,
702 "net_perf": TestNetPerf
, "net_perftests": TestNetPerf
,
703 "phonenumber": TestPhoneNumber
,
704 "libphonenumber_unittests": TestPhoneNumber
,
705 "ppapi": TestPPAPI
, "ppapi_unittests": TestPPAPI
,
706 "printing": TestPrinting
, "printing_unittests": TestPrinting
,
707 "remoting": TestRemoting
, "remoting_unittests": TestRemoting
,
708 "safe_browsing": TestSafeBrowsing
, "safe_browsing_tests": TestSafeBrowsing
,
709 "sandbox": TestLinuxSandbox
, "sandbox_linux_unittests": TestLinuxSandbox
,
710 "sql": TestSql
, "sql_unittests": TestSql
,
711 "sync": TestSync
, "sync_unit_tests": TestSync
,
712 "sync_integration_tests": TestSyncIntegration
,
713 "sync_integration": TestSyncIntegration
,
714 "ui_base_unit": TestUIBaseUnit
, "ui_base_unittests": TestUIBaseUnit
,
715 "unit": TestUnit
, "unit_tests": TestUnit
,
716 "url": TestURL
, "url_unittests": TestURL
,
717 "views": TestViews
, "views_unittests": TestViews
,
718 "webkit": TestLayout
,
723 parser
= optparse
.OptionParser("usage: %prog -b <dir> -t <test> "
726 parser
.add_option("--help-tests", dest
="help_tests", action
="store_true",
727 default
=False, help="List all available tests")
728 parser
.add_option("-b", "--build-dir",
729 help="the location of the compiler output")
730 parser
.add_option("--target", help="Debug or Release")
731 parser
.add_option("-t", "--test", action
="append", default
=[],
732 help="which test to run, supports test:gtest_filter format "
734 parser
.add_option("--baseline", action
="store_true", default
=False,
735 help="generate baseline data instead of validating")
736 parser
.add_option("--gtest_filter",
737 help="additional arguments to --gtest_filter")
738 parser
.add_option("--gtest_repeat", help="argument for --gtest_repeat")
739 parser
.add_option("--gtest_shuffle", action
="store_true", default
=False,
740 help="Randomize tests' orders on every iteration.")
741 parser
.add_option("--gtest_break_on_failure", action
="store_true",
743 help="Drop in to debugger on assertion failure. Also "
744 "useful for forcing tests to exit with a stack dump "
745 "on the first assertion failure when running with "
747 parser
.add_option("-v", "--verbose", action
="store_true", default
=False,
748 help="verbose output - enable debug log messages")
749 parser
.add_option("--tool", dest
="valgrind_tool", default
="memcheck",
750 help="specify a valgrind tool to run the tests under")
751 parser
.add_option("--tool_flags", dest
="valgrind_tool_flags", default
="",
752 help="specify custom flags for the selected valgrind tool")
753 parser
.add_option("--keep_logs", action
="store_true", default
=False,
754 help="store memory tool logs in the <tool>.logs directory "
755 "instead of /tmp.\nThis can be useful for tool "
756 "developers/maintainers.\nPlease note that the <tool>"
757 ".logs directory will be clobbered on tool startup.")
758 parser
.add_option("-n", "--num_tests", type="int",
759 default
=ChromeTests
.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
,
760 help="for layout tests: # of subtests per run. 0 for all.")
761 # TODO(thestig) Remove this if we can.
762 parser
.add_option("--gtest_color", dest
="gtest_color", default
="no",
763 help="dummy compatibility flag for sharding_supervisor.")
764 parser
.add_option("--brave-new-test-launcher", action
="store_true",
765 help="run the tests with --brave-new-test-launcher")
766 parser
.add_option("--test-launcher-bot-mode", action
="store_true",
767 help="run the tests with --test-launcher-bot-mode")
769 options
, args
= parser
.parse_args()
771 # Bake target into build_dir.
772 if options
.target
and options
.build_dir
:
773 assert (options
.target
!=
774 os
.path
.basename(os
.path
.dirname(options
.build_dir
)))
775 options
.build_dir
= os
.path
.join(os
.path
.abspath(options
.build_dir
),
779 logging_utils
.config_root(logging
.DEBUG
)
781 logging_utils
.config_root()
783 if options
.help_tests
:
784 ChromeTests
.ShowTests()
788 parser
.error("--test not specified")
790 if len(options
.test
) != 1 and options
.gtest_filter
:
791 parser
.error("--gtest_filter and multiple tests don't make sense together")
793 for t
in options
.test
:
794 tests
= ChromeTests(options
, args
, t
)
800 if __name__
== "__main__":