2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 ''' Runs various chrome tests through valgrind_test.py.'''
10 import multiprocessing
23 class TestNotFound(Exception): pass
25 class MultipleGTestFiltersSpecified(Exception): pass
27 class BuildDirNotFound(Exception): pass
29 class BuildDirAmbiguous(Exception): pass
31 class ExecutableNotFound(Exception): pass
33 class BadBinary(Exception): pass
36 SLOW_TOOLS
= ["memcheck", "drmemory"]
37 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
= 300
39 def __init__(self
, options
, args
, test
):
41 (self
._test
, self
._gtest
_filter
) = test
.split(':', 1)
44 self
._gtest
_filter
= options
.gtest_filter
46 if self
._test
not in self
._test
_list
:
47 raise TestNotFound("Unknown test: %s" % test
)
49 if options
.gtest_filter
and options
.gtest_filter
!= self
._gtest
_filter
:
50 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
51 "and --test %s" % test
)
53 self
._options
= options
56 script_dir
= path_utils
.ScriptDir()
57 # Compute the top of the tree (the "source dir") from the script dir (where
58 # this script lives). We assume that the script dir is in tools/valgrind/
59 # relative to the top of the tree.
60 self
._source
_dir
= os
.path
.dirname(os
.path
.dirname(script_dir
))
61 # since this path is used for string matching, make sure it's always
62 # an absolute Unix-style path
63 self
._source
_dir
= os
.path
.abspath(self
._source
_dir
).replace('\\', '/')
64 valgrind_test_script
= os
.path
.join(script_dir
, "valgrind_test.py")
65 self
._command
_preamble
= ["--source-dir=%s" % (self
._source
_dir
)]
67 if not self
._options
.build_dir
:
69 os
.path
.join(self
._source
_dir
, "xcodebuild", "Debug"),
70 os
.path
.join(self
._source
_dir
, "out", "Debug"),
71 os
.path
.join(self
._source
_dir
, "build", "Debug"),
73 build_dir
= [d
for d
in dirs
if os
.path
.isdir(d
)]
74 if len(build_dir
) > 1:
75 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
76 "%s\nPlease specify just one "
77 "using --build-dir" % ", ".join(build_dir
))
79 self
._options
.build_dir
= build_dir
[0]
81 self
._options
.build_dir
= None
83 if self
._options
.build_dir
:
84 build_dir
= os
.path
.abspath(self
._options
.build_dir
)
85 self
._command
_preamble
+= ["--build-dir=%s" % (self
._options
.build_dir
)]
87 def _EnsureBuildDirFound(self
):
88 if not self
._options
.build_dir
:
89 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
90 "specify it manually using --build-dir")
92 def _DefaultCommand(self
, tool
, exe
=None, valgrind_test_args
=None):
93 '''Generates the default command array that most tests will use.'''
94 if exe
and common
.IsWindows():
97 cmd
= list(self
._command
_preamble
)
99 # Find all suppressions matching the following pattern:
100 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
101 # and list them with --suppressions= prefix.
102 script_dir
= path_utils
.ScriptDir()
103 tool_name
= tool
.ToolName();
104 suppression_file
= os
.path
.join(script_dir
, tool_name
, "suppressions.txt")
105 if os
.path
.exists(suppression_file
):
106 cmd
.append("--suppressions=%s" % suppression_file
)
107 # Platform-specific suppression
108 for platform
in common
.PlatformNames():
109 platform_suppression_file
= \
110 os
.path
.join(script_dir
, tool_name
, 'suppressions_%s.txt' % platform
)
111 if os
.path
.exists(platform_suppression_file
):
112 cmd
.append("--suppressions=%s" % platform_suppression_file
)
114 if self
._options
.valgrind_tool_flags
:
115 cmd
+= self
._options
.valgrind_tool_flags
.split(" ")
116 if self
._options
.keep_logs
:
117 cmd
+= ["--keep_logs"]
118 if valgrind_test_args
!= None:
119 for arg
in valgrind_test_args
:
122 self
._EnsureBuildDirFound
()
123 exe_path
= os
.path
.join(self
._options
.build_dir
, exe
)
124 if not os
.path
.exists(exe_path
):
125 raise ExecutableNotFound("Couldn't find '%s'" % exe_path
)
127 # Make sure we don't try to test ASan-built binaries
128 # with other dynamic instrumentation-based tools.
129 # TODO(timurrrr): also check TSan and MSan?
130 # `nm` might not be available, so use try-except.
132 # Do not perform this check on OS X, as 'nm' on 10.6 can't handle
133 # binaries built with Clang 3.5+.
134 if not common
.IsMac():
135 nm_output
= subprocess
.check_output(["nm", exe_path
])
136 if nm_output
.find("__asan_init") != -1:
137 raise BadBinary("You're trying to run an executable instrumented "
138 "with AddressSanitizer under %s. Please provide "
139 "an uninstrumented executable." % tool_name
)
144 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
145 # so we can find the slowpokes.
146 cmd
.append("--gtest_print_time")
147 # Built-in test launcher for gtest-based executables runs tests using
148 # multiple process by default. Force the single-process mode back.
149 cmd
.append("--single-process-tests")
150 if self
._options
.gtest_repeat
:
151 cmd
.append("--gtest_repeat=%s" % self
._options
.gtest_repeat
)
152 if self
._options
.gtest_shuffle
:
153 cmd
.append("--gtest_shuffle")
154 if self
._options
.gtest_break_on_failure
:
155 cmd
.append("--gtest_break_on_failure")
156 if self
._options
.test_launcher_bot_mode
:
157 cmd
.append("--test-launcher-bot-mode")
158 if self
._options
.test_launcher_total_shards
is not None:
159 cmd
.append("--test-launcher-total-shards=%d" % self
._options
.test_launcher_total_shards
)
160 if self
._options
.test_launcher_shard_index
is not None:
161 cmd
.append("--test-launcher-shard-index=%d" % self
._options
.test_launcher_shard_index
)
165 ''' Runs the test specified by command-line argument --test '''
166 logging
.info("running test %s" % (self
._test
))
167 return self
._test
_list
[self
._test
](self
)
169 def _AppendGtestFilter(self
, tool
, name
, cmd
):
170 '''Append an appropriate --gtest_filter flag to the googletest binary
172 If the user passed his own filter mentioning only one test, just use it.
173 Othewise, filter out tests listed in the appropriate gtest_exclude files.
175 if (self
._gtest
_filter
and
176 ":" not in self
._gtest
_filter
and
177 "?" not in self
._gtest
_filter
and
178 "*" not in self
._gtest
_filter
):
179 cmd
.append("--gtest_filter=%s" % self
._gtest
_filter
)
183 gtest_files_dir
= os
.path
.join(path_utils
.ScriptDir(), "gtest_exclude")
185 gtest_filter_files
= [
186 os
.path
.join(gtest_files_dir
, name
+ ".gtest-%s.txt" % tool
.ToolName())]
187 # Use ".gtest.txt" files only for slow tools, as they now contain
188 # Valgrind- and Dr.Memory-specific filters.
189 # TODO(glider): rename the files to ".gtest_slow.txt"
190 if tool
.ToolName() in ChromeTests
.SLOW_TOOLS
:
191 gtest_filter_files
+= [os
.path
.join(gtest_files_dir
, name
+ ".gtest.txt")]
192 for platform_suffix
in common
.PlatformNames():
193 gtest_filter_files
+= [
194 os
.path
.join(gtest_files_dir
, name
+ ".gtest_%s.txt" % platform_suffix
),
195 os
.path
.join(gtest_files_dir
, name
+ ".gtest-%s_%s.txt" % \
196 (tool
.ToolName(), platform_suffix
))]
197 logging
.info("Reading gtest exclude filter files:")
198 for filename
in gtest_filter_files
:
199 # strip the leading absolute path (may be very long on the bot)
200 # and the following / or \.
201 readable_filename
= filename
.replace("\\", "/") # '\' on Windows
202 readable_filename
= readable_filename
.replace(self
._source
_dir
, "")[1:]
203 if not os
.path
.exists(filename
):
204 logging
.info(" \"%s\" - not found" % readable_filename
)
206 logging
.info(" \"%s\" - OK" % readable_filename
)
207 f
= open(filename
, 'r')
208 for line
in f
.readlines():
209 if line
.startswith("#") or line
.startswith("//") or line
.isspace():
212 test_prefixes
= ["FLAKY", "FAILS"]
213 for p
in test_prefixes
:
214 # Strip prefixes from the test names.
215 line
= line
.replace(".%s_" % p
, ".")
216 # Exclude the original test name.
218 if line
[-2:] != ".*":
219 # List all possible prefixes if line doesn't end with ".*".
220 for p
in test_prefixes
:
221 filters
.append(line
.replace(".", ".%s_" % p
))
222 # Get rid of duplicates.
223 filters
= set(filters
)
224 gtest_filter
= self
._gtest
_filter
228 if gtest_filter
.find("-") < 0:
232 gtest_filter
+= ":".join(filters
)
234 cmd
.append("--gtest_filter=%s" % gtest_filter
)
239 for name
, test_function
in ChromeTests
._test
_list
.iteritems():
240 test_to_names
.setdefault(test_function
, []).append(name
)
243 for names
in test_to_names
.itervalues():
244 names
.sort(key
=lambda name
: len(name
))
245 name_to_aliases
[names
[0]] = names
[1:]
248 print "Available tests:"
249 print "----------------"
250 for name
, aliases
in sorted(name_to_aliases
.iteritems()):
252 print " {} (aka {})".format(name
, ', '.join(aliases
))
254 print " {}".format(name
)
256 def SetupLdPath(self
, requires_build_dir
):
257 if requires_build_dir
:
258 self
._EnsureBuildDirFound
()
259 elif not self
._options
.build_dir
:
262 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
263 if (os
.getenv("LD_LIBRARY_PATH")):
264 os
.putenv("LD_LIBRARY_PATH", "%s:%s" % (os
.getenv("LD_LIBRARY_PATH"),
265 self
._options
.build_dir
))
267 os
.putenv("LD_LIBRARY_PATH", self
._options
.build_dir
)
269 def SimpleTest(self
, module
, name
, valgrind_test_args
=None, cmd_args
=None):
270 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
271 cmd
= self
._DefaultCommand
(tool
, name
, valgrind_test_args
)
272 self
._AppendGtestFilter
(tool
, name
, cmd
)
273 cmd
.extend(['--test-tiny-timeout=1000'])
277 self
.SetupLdPath(True)
278 return tool
.Run(cmd
, module
)
280 def RunCmdLine(self
):
281 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
282 cmd
= self
._DefaultCommand
(tool
, None, self
._args
)
283 self
.SetupLdPath(False)
284 return tool
.Run(cmd
, None)
286 def TestAccessibility(self
):
287 return self
.SimpleTest("accessibility", "accessibility_unittests")
289 def TestAddressInput(self
):
290 return self
.SimpleTest("addressinput", "libaddressinput_unittests")
293 return self
.SimpleTest("angle", "angle_unittests")
295 def TestAppList(self
):
296 return self
.SimpleTest("app_list", "app_list_unittests")
299 return self
.SimpleTest("ash", "ash_unittests")
302 return self
.SimpleTest("aura", "aura_unittests")
305 return self
.SimpleTest("base", "base_unittests")
307 def TestBlinkHeap(self
):
308 return self
.SimpleTest("blink_heap", "blink_heap_unittests")
310 def TestBlinkPlatform(self
):
311 return self
.SimpleTest("blink_platform", "blink_platform_unittests")
313 def TestCacheInvalidation(self
):
314 return self
.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
317 return self
.SimpleTest("chrome", "cast_unittests")
320 return self
.SimpleTest("cc", "cc_unittests")
322 def TestChromeApp(self
):
323 return self
.SimpleTest("chrome_app", "chrome_app_unittests")
325 def TestChromeElf(self
):
326 return self
.SimpleTest("chrome_elf", "chrome_elf_unittests")
328 def TestChromeDriver(self
):
329 return self
.SimpleTest("chromedriver", "chromedriver_unittests")
331 def TestChromeOS(self
):
332 return self
.SimpleTest("chromeos", "chromeos_unittests")
334 def TestCloudPrint(self
):
335 return self
.SimpleTest("cloud_print", "cloud_print_unittests")
337 def TestComponents(self
):
338 return self
.SimpleTest("components", "components_unittests")
340 def TestCompositor(self
):
341 return self
.SimpleTest("compositor", "compositor_unittests")
343 def TestContent(self
):
344 return self
.SimpleTest("content", "content_unittests")
346 def TestCourgette(self
):
347 return self
.SimpleTest("courgette", "courgette_unittests")
349 def TestCrypto(self
):
350 return self
.SimpleTest("crypto", "crypto_unittests")
352 def TestDevice(self
):
353 return self
.SimpleTest("device", "device_unittests")
355 def TestDisplay(self
):
356 return self
.SimpleTest("display", "display_unittests")
358 def TestEvents(self
):
359 return self
.SimpleTest("events", "events_unittests")
361 def TestExtensions(self
):
362 return self
.SimpleTest("extensions", "extensions_unittests")
364 def TestFFmpegRegressions(self
):
365 return self
.SimpleTest("chrome", "ffmpeg_regression_tests")
368 return self
.SimpleTest("gcm", "gcm_unit_tests")
371 return self
.SimpleTest("gfx", "gfx_unittests")
374 return self
.SimpleTest("gin", "gin_unittests")
376 def TestGoogleApis(self
):
377 return self
.SimpleTest("google_apis", "google_apis_unittests")
380 return self
.SimpleTest("gpu", "gpu_unittests")
383 return self
.SimpleTest("ipc", "ipc_tests",
384 valgrind_test_args
=["--trace_children"])
386 def TestInstallerUtil(self
):
387 return self
.SimpleTest("installer_util", "installer_util_unittests")
389 def TestJingle(self
):
390 return self
.SimpleTest("chrome", "jingle_unittests")
392 def TestKeyboard(self
):
393 return self
.SimpleTest("keyboard", "keyboard_unittests")
396 return self
.SimpleTest("chrome", "media_unittests")
398 def TestMessageCenter(self
):
399 return self
.SimpleTest("message_center", "message_center_unittests")
402 return self
.SimpleTest("chrome", "midi_unittests")
404 def TestMojoCommon(self
):
405 return self
.SimpleTest("mojo_common", "mojo_common_unittests")
407 def TestMojoPublicBindings(self
):
408 return self
.SimpleTest("mojo_public_bindings",
409 "mojo_public_bindings_unittests")
411 def TestMojoPublicEnv(self
):
412 return self
.SimpleTest("mojo_public_env",
413 "mojo_public_environment_unittests")
415 def TestMojoPublicSystem(self
):
416 return self
.SimpleTest("mojo_public_system",
417 "mojo_public_system_unittests")
419 def TestMojoPublicSysPerf(self
):
420 return self
.SimpleTest("mojo_public_sysperf",
421 "mojo_public_system_perftests")
423 def TestMojoPublicUtility(self
):
424 return self
.SimpleTest("mojo_public_utility",
425 "mojo_public_utility_unittests")
427 def TestMojoSystem(self
):
428 return self
.SimpleTest("mojo_system", "mojo_system_unittests")
431 return self
.SimpleTest("net", "net_unittests")
433 def TestNetPerf(self
):
434 return self
.SimpleTest("net", "net_perftests")
436 def TestPhoneNumber(self
):
437 return self
.SimpleTest("phonenumber", "libphonenumber_unittests")
440 return self
.SimpleTest("chrome", "ppapi_unittests")
442 def TestPrinting(self
):
443 return self
.SimpleTest("chrome", "printing_unittests")
445 def TestRemoting(self
):
446 return self
.SimpleTest("chrome", "remoting_unittests",
448 "--ui-test-action-timeout=60000",
449 "--ui-test-action-max-timeout=150000"])
452 return self
.SimpleTest("skia", "skia_unittests")
455 return self
.SimpleTest("chrome", "sql_unittests")
458 return self
.SimpleTest("chrome", "sync_unit_tests")
460 def TestLinuxSandbox(self
):
461 return self
.SimpleTest("sandbox", "sandbox_linux_unittests")
464 # http://crbug.com/51716
465 # Disabling all unit tests
466 # Problems reappeared after r119922
467 if common
.IsMac() and (self
._options
.valgrind_tool
== "memcheck"):
468 logging
.warning("unit_tests are disabled for memcheck on MacOS.")
470 return self
.SimpleTest("chrome", "unit_tests")
472 def TestUIBaseUnit(self
):
473 return self
.SimpleTest("chrome", "ui_base_unittests")
475 def TestUIChromeOS(self
):
476 return self
.SimpleTest("chrome", "ui_chromeos_unittests")
479 return self
.SimpleTest("chrome", "url_unittests")
482 return self
.SimpleTest("views", "views_unittests")
485 # Valgrind timeouts are in seconds.
486 UI_VALGRIND_ARGS
= ["--timeout=14400", "--trace_children", "--indirect"]
487 # UI test timeouts are in milliseconds.
488 UI_TEST_ARGS
= ["--ui-test-action-timeout=60000",
489 "--ui-test-action-max-timeout=150000",
492 # TODO(thestig) fine-tune these values.
493 # Valgrind timeouts are in seconds.
494 BROWSER_VALGRIND_ARGS
= ["--timeout=50000", "--trace_children", "--indirect"]
495 # Browser test timeouts are in milliseconds.
496 BROWSER_TEST_ARGS
= ["--ui-test-action-timeout=400000",
497 "--ui-test-action-max-timeout=800000",
500 def TestBrowser(self
):
501 return self
.SimpleTest("chrome", "browser_tests",
502 valgrind_test_args
=self
.BROWSER_VALGRIND_ARGS
,
503 cmd_args
=self
.BROWSER_TEST_ARGS
)
505 def TestContentBrowser(self
):
506 return self
.SimpleTest("content", "content_browsertests",
507 valgrind_test_args
=self
.BROWSER_VALGRIND_ARGS
,
508 cmd_args
=self
.BROWSER_TEST_ARGS
)
510 def TestInteractiveUI(self
):
511 return self
.SimpleTest("chrome", "interactive_ui_tests",
512 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
513 cmd_args
=self
.UI_TEST_ARGS
)
515 def TestSafeBrowsing(self
):
516 return self
.SimpleTest("chrome", "safe_browsing_tests",
517 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
518 cmd_args
=(["--ui-test-action-max-timeout=450000"]))
520 def TestSyncIntegration(self
):
521 return self
.SimpleTest("chrome", "sync_integration_tests",
522 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
523 cmd_args
=(["--ui-test-action-max-timeout=450000"]))
525 def TestLayoutChunk(self
, chunk_num
, chunk_size
):
526 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
527 # list of tests. Wrap around to beginning of list at end.
528 # If chunk_size is zero, run all tests in the list once.
529 # If a text file is given as argument, it is used as the list of tests.
530 assert((chunk_size
== 0) != (len(self
._args
) == 0))
531 # Build the ginormous commandline in 'cmd'.
532 # It's going to be roughly
533 # python valgrind_test.py ...
534 # but we'll use the --indirect flag to valgrind_test.py
535 # to avoid valgrinding python.
536 # Start by building the valgrind_test.py commandline.
537 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
538 cmd
= self
._DefaultCommand
(tool
)
539 cmd
.append("--trace_children")
540 cmd
.append("--indirect_webkit_layout")
541 cmd
.append("--ignore_exit_code")
542 # Now build script_cmd, the run-webkits-tests commandline.
543 # Store each chunk in its own directory so that we can find the data later
544 chunk_dir
= os
.path
.join("layout", "chunk_%05d" % chunk_num
)
545 out_dir
= os
.path
.join(path_utils
.ScriptDir(), "latest")
546 out_dir
= os
.path
.join(out_dir
, chunk_dir
)
547 if os
.path
.exists(out_dir
):
548 old_files
= glob
.glob(os
.path
.join(out_dir
, "*.txt"))
553 script
= os
.path
.join(self
._source
_dir
, "third_party", "WebKit", "Tools",
554 "Scripts", "run-webkit-tests")
555 # http://crbug.com/260627: After the switch to content_shell from DRT, each
556 # test now brings up 3 processes. Under Valgrind, they become memory bound
557 # and can eventually OOM if we don't reduce the total count.
558 # It'd be nice if content_shell automatically throttled the startup of new
559 # tests if we're low on memory.
560 jobs
= max(1, int(multiprocessing
.cpu_count() * 0.3))
561 script_cmd
= ["python", script
, "-v",
562 # run a separate DumpRenderTree for each test
565 "--child-processes=%d" % jobs
,
566 "--time-out-ms=800000",
567 "--no-retry-failures", # retrying takes too much time
568 # http://crbug.com/176908: Don't launch a browser when done.
570 "--nocheck-sys-deps",
571 "--additional-driver-flag=--no-sandbox"]
572 # Pass build mode to run-webkit-tests. We aren't passed it directly,
573 # so parse it out of build_dir. run-webkit-tests can only handle
574 # the two values "Release" and "Debug".
575 # TODO(Hercules): unify how all our scripts pass around build mode
576 # (--mode / --target / --build-dir / --debug)
577 if self
._options
.build_dir
:
578 build_root
, mode
= os
.path
.split(self
._options
.build_dir
)
579 script_cmd
.extend(["--build-directory", build_root
, "--target", mode
])
581 script_cmd
.append("--run-chunk=%d:%d" % (chunk_num
, chunk_size
))
583 # if the arg is a txt file, then treat it as a list of tests
584 if os
.path
.isfile(self
._args
[0]) and self
._args
[0][-4:] == ".txt":
585 script_cmd
.append("--test-list=%s" % self
._args
[0])
587 script_cmd
.extend(self
._args
)
588 self
._AppendGtestFilter
(tool
, "layout", script_cmd
)
589 # Now run script_cmd with the wrapper in cmd
591 cmd
.extend(script_cmd
)
593 # Layout tests often times fail quickly, but the buildbot remains green.
594 # Detect this situation when running with the default chunk size.
595 if chunk_size
== self
.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
:
596 min_runtime_in_seconds
=120
598 min_runtime_in_seconds
=0
599 ret
= tool
.Run(cmd
, "layout", min_runtime_in_seconds
=min_runtime_in_seconds
)
603 def TestLayout(self
):
604 # A "chunk file" is maintained in the local directory so that each test
605 # runs a slice of the layout tests of size chunk_size that increments with
606 # each run. Since tests can be added and removed from the layout tests at
607 # any time, this is not going to give exact coverage, but it will allow us
608 # to continuously run small slices of the layout tests under valgrind rather
609 # than having to run all of them in one shot.
610 chunk_size
= self
._options
.num_tests
611 if chunk_size
== 0 or len(self
._args
):
612 return self
.TestLayoutChunk(0, 0)
614 chunk_file
= os
.path
.join("valgrind_layout_chunk.txt")
615 logging
.info("Reading state from " + chunk_file
)
621 chunk_num
= int(chunk_str
)
622 # This should be enough so that we have a couple of complete runs
623 # of test data stored in the archive (although note that when we loop
624 # that we almost guaranteed won't be at the end of the test list)
625 if chunk_num
> 10000:
628 except IOError, (errno
, strerror
):
629 logging
.error("error reading from file %s (%d, %s)" % (chunk_file
,
631 # Save the new chunk size before running the tests. Otherwise if a
632 # particular chunk hangs the bot, the chunk number will never get
633 # incremented and the bot will be wedged.
634 logging
.info("Saving state to " + chunk_file
)
636 f
= open(chunk_file
, "w")
638 f
.write("%d" % chunk_num
)
640 except IOError, (errno
, strerror
):
641 logging
.error("error writing to file %s (%d, %s)" % (chunk_file
, errno
,
643 # Since we're running small chunks of the layout tests, it's important to
644 # mark the ones that have errors in them. These won't be visible in the
645 # summary list for long, but will be useful for someone reviewing this bot.
646 return self
.TestLayoutChunk(chunk_num
, chunk_size
)
648 # The known list of tests.
649 # Recognise the original abbreviations as well as full executable names.
651 "cmdline" : RunCmdLine
,
652 "addressinput": TestAddressInput
,
653 "libaddressinput_unittests": TestAddressInput
,
654 "accessibility": TestAccessibility
,
655 "angle": TestAngle
, "angle_unittests": TestAngle
,
656 "app_list": TestAppList
, "app_list_unittests": TestAppList
,
657 "ash": TestAsh
, "ash_unittests": TestAsh
,
658 "aura": TestAura
, "aura_unittests": TestAura
,
659 "base": TestBase
, "base_unittests": TestBase
,
660 "blink_heap": TestBlinkHeap
,
661 "blink_platform": TestBlinkPlatform
,
662 "browser": TestBrowser
, "browser_tests": TestBrowser
,
663 "cacheinvalidation": TestCacheInvalidation
,
664 "cacheinvalidation_unittests": TestCacheInvalidation
,
665 "cast": TestCast
, "cast_unittests": TestCast
,
666 "cc": TestCC
, "cc_unittests": TestCC
,
667 "chrome_app": TestChromeApp
,
668 "chrome_elf": TestChromeElf
,
669 "chromedriver": TestChromeDriver
,
670 "chromeos": TestChromeOS
, "chromeos_unittests": TestChromeOS
,
671 "cloud_print": TestCloudPrint
,
672 "cloud_print_unittests": TestCloudPrint
,
673 "components": TestComponents
,"components_unittests": TestComponents
,
674 "compositor": TestCompositor
,"compositor_unittests": TestCompositor
,
675 "content": TestContent
, "content_unittests": TestContent
,
676 "content_browsertests": TestContentBrowser
,
677 "courgette": TestCourgette
, "courgette_unittests": TestCourgette
,
678 "crypto": TestCrypto
, "crypto_unittests": TestCrypto
,
679 "device": TestDevice
, "device_unittests": TestDevice
,
680 "display": TestDisplay
, "display_unittests": TestDisplay
,
681 "events": TestEvents
, "events_unittests": TestEvents
,
682 "extensions": TestExtensions
, "extensions_unittests": TestExtensions
,
683 "ffmpeg_regression_tests": TestFFmpegRegressions
,
684 "gcm": TestGCM
, "gcm_unit_tests": TestGCM
,
685 "gin": TestGin
, "gin_unittests": TestGin
,
686 "gfx": TestGfx
, "gfx_unittests": TestGfx
,
687 "google_apis": TestGoogleApis
,
688 "gpu": TestGPU
, "gpu_unittests": TestGPU
,
689 "ipc": TestIpc
, "ipc_tests": TestIpc
,
690 "installer_util": TestInstallerUtil
,
691 "interactive_ui": TestInteractiveUI
,
692 "jingle": TestJingle
, "jingle_unittests": TestJingle
,
693 "keyboard": TestKeyboard
, "keyboard_unittests": TestKeyboard
,
694 "layout": TestLayout
, "layout_tests": TestLayout
,
695 "media": TestMedia
, "media_unittests": TestMedia
,
696 "message_center": TestMessageCenter
,
697 "message_center_unittests" : TestMessageCenter
,
698 "midi": TestMidi
, "midi_unittests": TestMidi
,
699 "mojo_common": TestMojoCommon
,
700 "mojo_system": TestMojoSystem
,
701 "mojo_public_system": TestMojoPublicSystem
,
702 "mojo_public_utility": TestMojoPublicUtility
,
703 "mojo_public_bindings": TestMojoPublicBindings
,
704 "mojo_public_env": TestMojoPublicEnv
,
705 "mojo_public_sysperf": TestMojoPublicSysPerf
,
706 "net": TestNet
, "net_unittests": TestNet
,
707 "net_perf": TestNetPerf
, "net_perftests": TestNetPerf
,
708 "phonenumber": TestPhoneNumber
,
709 "libphonenumber_unittests": TestPhoneNumber
,
710 "ppapi": TestPPAPI
, "ppapi_unittests": TestPPAPI
,
711 "printing": TestPrinting
, "printing_unittests": TestPrinting
,
712 "remoting": TestRemoting
, "remoting_unittests": TestRemoting
,
713 "safe_browsing": TestSafeBrowsing
, "safe_browsing_tests": TestSafeBrowsing
,
714 "sandbox": TestLinuxSandbox
, "sandbox_linux_unittests": TestLinuxSandbox
,
715 "skia": TestSkia
, "skia_unittests": TestSkia
,
716 "sql": TestSql
, "sql_unittests": TestSql
,
717 "sync": TestSync
, "sync_unit_tests": TestSync
,
718 "sync_integration_tests": TestSyncIntegration
,
719 "sync_integration": TestSyncIntegration
,
720 "ui_base_unit": TestUIBaseUnit
, "ui_base_unittests": TestUIBaseUnit
,
721 "ui_chromeos": TestUIChromeOS
, "ui_chromeos_unittests": TestUIChromeOS
,
722 "unit": TestUnit
, "unit_tests": TestUnit
,
723 "url": TestURL
, "url_unittests": TestURL
,
724 "views": TestViews
, "views_unittests": TestViews
,
725 "webkit": TestLayout
,
730 parser
= optparse
.OptionParser("usage: %prog -b <dir> -t <test> "
733 parser
.add_option("--help-tests", dest
="help_tests", action
="store_true",
734 default
=False, help="List all available tests")
735 parser
.add_option("-b", "--build-dir",
736 help="the location of the compiler output")
737 parser
.add_option("--target", help="Debug or Release")
738 parser
.add_option("-t", "--test", action
="append", default
=[],
739 help="which test to run, supports test:gtest_filter format "
741 parser
.add_option("--baseline", action
="store_true", default
=False,
742 help="generate baseline data instead of validating")
743 parser
.add_option("--gtest_filter",
744 help="additional arguments to --gtest_filter")
745 parser
.add_option("--gtest_repeat", help="argument for --gtest_repeat")
746 parser
.add_option("--gtest_shuffle", action
="store_true", default
=False,
747 help="Randomize tests' orders on every iteration.")
748 parser
.add_option("--gtest_break_on_failure", action
="store_true",
750 help="Drop in to debugger on assertion failure. Also "
751 "useful for forcing tests to exit with a stack dump "
752 "on the first assertion failure when running with "
754 parser
.add_option("-v", "--verbose", action
="store_true", default
=False,
755 help="verbose output - enable debug log messages")
756 parser
.add_option("--tool", dest
="valgrind_tool", default
="memcheck",
757 help="specify a valgrind tool to run the tests under")
758 parser
.add_option("--tool_flags", dest
="valgrind_tool_flags", default
="",
759 help="specify custom flags for the selected valgrind tool")
760 parser
.add_option("--keep_logs", action
="store_true", default
=False,
761 help="store memory tool logs in the <tool>.logs directory "
762 "instead of /tmp.\nThis can be useful for tool "
763 "developers/maintainers.\nPlease note that the <tool>"
764 ".logs directory will be clobbered on tool startup.")
765 parser
.add_option("-n", "--num_tests", type="int",
766 default
=ChromeTests
.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
,
767 help="for layout tests: # of subtests per run. 0 for all.")
768 parser
.add_option("--test-launcher-bot-mode", action
="store_true",
769 help="run the tests with --test-launcher-bot-mode")
770 parser
.add_option("--test-launcher-total-shards", type=int,
771 help="run the tests with --test-launcher-total-shards")
772 parser
.add_option("--test-launcher-shard-index", type=int,
773 help="run the tests with --test-launcher-shard-index")
775 options
, args
= parser
.parse_args()
777 # Bake target into build_dir.
778 if options
.target
and options
.build_dir
:
779 assert (options
.target
!=
780 os
.path
.basename(os
.path
.dirname(options
.build_dir
)))
781 options
.build_dir
= os
.path
.join(os
.path
.abspath(options
.build_dir
),
785 logging_utils
.config_root(logging
.DEBUG
)
787 logging_utils
.config_root()
789 if options
.help_tests
:
790 ChromeTests
.ShowTests()
794 parser
.error("--test not specified")
796 if len(options
.test
) != 1 and options
.gtest_filter
:
797 parser
.error("--gtest_filter and multiple tests don't make sense together")
799 for t
in options
.test
:
800 tests
= ChromeTests(options
, args
, t
)
806 if __name__
== "__main__":