2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 ''' Runs various chrome tests through valgrind_test.py.'''
10 import multiprocessing
22 class TestNotFound(Exception): pass
24 class MultipleGTestFiltersSpecified(Exception): pass
26 class BuildDirNotFound(Exception): pass
28 class BuildDirAmbiguous(Exception): pass
31 SLOW_TOOLS
= ["memcheck", "tsan", "tsan_rv", "drmemory"]
32 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
= 500
34 def __init__(self
, options
, args
, test
):
36 (self
._test
, self
._gtest
_filter
) = test
.split(':', 1)
39 self
._gtest
_filter
= options
.gtest_filter
41 if self
._test
not in self
._test
_list
:
42 raise TestNotFound("Unknown test: %s" % test
)
44 if options
.gtest_filter
and options
.gtest_filter
!= self
._gtest
_filter
:
45 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
46 "and --test %s" % test
)
48 self
._options
= options
51 script_dir
= path_utils
.ScriptDir()
52 # Compute the top of the tree (the "source dir") from the script dir (where
53 # this script lives). We assume that the script dir is in tools/valgrind/
54 # relative to the top of the tree.
55 self
._source
_dir
= os
.path
.dirname(os
.path
.dirname(script_dir
))
56 # since this path is used for string matching, make sure it's always
57 # an absolute Unix-style path
58 self
._source
_dir
= os
.path
.abspath(self
._source
_dir
).replace('\\', '/')
59 valgrind_test_script
= os
.path
.join(script_dir
, "valgrind_test.py")
60 self
._command
_preamble
= ["--source_dir=%s" % (self
._source
_dir
)]
62 if not self
._options
.build_dir
:
64 os
.path
.join(self
._source
_dir
, "xcodebuild", "Debug"),
65 os
.path
.join(self
._source
_dir
, "out", "Debug"),
66 os
.path
.join(self
._source
_dir
, "build", "Debug"),
68 build_dir
= [d
for d
in dirs
if os
.path
.isdir(d
)]
69 if len(build_dir
) > 1:
70 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
71 "%s\nPlease specify just one "
72 "using --build_dir" % ", ".join(build_dir
))
74 self
._options
.build_dir
= build_dir
[0]
76 self
._options
.build_dir
= None
78 if self
._options
.build_dir
:
79 build_dir
= os
.path
.abspath(self
._options
.build_dir
)
80 self
._command
_preamble
+= ["--build_dir=%s" % (self
._options
.build_dir
)]
82 def _EnsureBuildDirFound(self
):
83 if not self
._options
.build_dir
:
84 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
85 "specify it manually using --build_dir")
87 def _DefaultCommand(self
, tool
, exe
=None, valgrind_test_args
=None):
88 '''Generates the default command array that most tests will use.'''
89 if exe
and common
.IsWindows():
92 cmd
= list(self
._command
_preamble
)
94 # Find all suppressions matching the following pattern:
95 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
96 # and list them with --suppressions= prefix.
97 script_dir
= path_utils
.ScriptDir()
98 tool_name
= tool
.ToolName();
99 suppression_file
= os
.path
.join(script_dir
, tool_name
, "suppressions.txt")
100 if os
.path
.exists(suppression_file
):
101 cmd
.append("--suppressions=%s" % suppression_file
)
102 # Platform-specific suppression
103 for platform
in common
.PlatformNames():
104 platform_suppression_file
= \
105 os
.path
.join(script_dir
, tool_name
, 'suppressions_%s.txt' % platform
)
106 if os
.path
.exists(platform_suppression_file
):
107 cmd
.append("--suppressions=%s" % platform_suppression_file
)
109 if self
._options
.valgrind_tool_flags
:
110 cmd
+= self
._options
.valgrind_tool_flags
.split(" ")
111 if self
._options
.keep_logs
:
112 cmd
+= ["--keep_logs"]
113 if valgrind_test_args
!= None:
114 for arg
in valgrind_test_args
:
117 self
._EnsureBuildDirFound
()
118 cmd
.append(os
.path
.join(self
._options
.build_dir
, exe
))
119 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
120 # so we can find the slowpokes.
121 cmd
.append("--gtest_print_time")
122 if self
._options
.gtest_repeat
:
123 cmd
.append("--gtest_repeat=%s" % self
._options
.gtest_repeat
)
127 ''' Runs the test specified by command-line argument --test '''
128 logging
.info("running test %s" % (self
._test
))
129 return self
._test
_list
[self
._test
](self
)
131 def _AppendGtestFilter(self
, tool
, name
, cmd
):
132 '''Append an appropriate --gtest_filter flag to the googletest binary
134 If the user passed his own filter mentioning only one test, just use it.
135 Othewise, filter out tests listed in the appropriate gtest_exclude files.
137 if (self
._gtest
_filter
and
138 ":" not in self
._gtest
_filter
and
139 "?" not in self
._gtest
_filter
and
140 "*" not in self
._gtest
_filter
):
141 cmd
.append("--gtest_filter=%s" % self
._gtest
_filter
)
145 gtest_files_dir
= os
.path
.join(path_utils
.ScriptDir(), "gtest_exclude")
147 gtest_filter_files
= [
148 os
.path
.join(gtest_files_dir
, name
+ ".gtest-%s.txt" % tool
.ToolName())]
149 # Use ".gtest.txt" files only for slow tools, as they now contain
150 # Valgrind- and Dr.Memory-specific filters.
151 # TODO(glider): rename the files to ".gtest_slow.txt"
152 if tool
.ToolName() in ChromeTests
.SLOW_TOOLS
:
153 gtest_filter_files
+= [os
.path
.join(gtest_files_dir
, name
+ ".gtest.txt")]
154 for platform_suffix
in common
.PlatformNames():
155 gtest_filter_files
+= [
156 os
.path
.join(gtest_files_dir
, name
+ ".gtest_%s.txt" % platform_suffix
),
157 os
.path
.join(gtest_files_dir
, name
+ ".gtest-%s_%s.txt" % \
158 (tool
.ToolName(), platform_suffix
))]
159 logging
.info("Reading gtest exclude filter files:")
160 for filename
in gtest_filter_files
:
161 # strip the leading absolute path (may be very long on the bot)
162 # and the following / or \.
163 readable_filename
= filename
.replace("\\", "/") # '\' on Windows
164 readable_filename
= readable_filename
.replace(self
._source
_dir
, "")[1:]
165 if not os
.path
.exists(filename
):
166 logging
.info(" \"%s\" - not found" % readable_filename
)
168 logging
.info(" \"%s\" - OK" % readable_filename
)
169 f
= open(filename
, 'r')
170 for line
in f
.readlines():
171 if line
.startswith("#") or line
.startswith("//") or line
.isspace():
174 test_prefixes
= ["FLAKY", "FAILS"]
175 for p
in test_prefixes
:
176 # Strip prefixes from the test names.
177 line
= line
.replace(".%s_" % p
, ".")
178 # Exclude the original test name.
180 if line
[-2:] != ".*":
181 # List all possible prefixes if line doesn't end with ".*".
182 for p
in test_prefixes
:
183 filters
.append(line
.replace(".", ".%s_" % p
))
184 # Get rid of duplicates.
185 filters
= set(filters
)
186 gtest_filter
= self
._gtest
_filter
190 if gtest_filter
.find("-") < 0:
194 gtest_filter
+= ":".join(filters
)
196 cmd
.append("--gtest_filter=%s" % gtest_filter
)
201 for name
, test_function
in ChromeTests
._test
_list
.iteritems():
202 test_to_names
.setdefault(test_function
, []).append(name
)
205 for names
in test_to_names
.itervalues():
206 names
.sort(key
=lambda name
: len(name
))
207 name_to_aliases
[names
[0]] = names
[1:]
210 print "Available tests:"
211 print "----------------"
212 for name
, aliases
in sorted(name_to_aliases
.iteritems()):
214 print " {} (aka {})".format(name
, ', '.join(aliases
))
216 print " {}".format(name
)
218 def SetupLdPath(self
, requires_build_dir
):
219 if requires_build_dir
:
220 self
._EnsureBuildDirFound
()
221 elif not self
._options
.build_dir
:
224 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
225 if (os
.getenv("LD_LIBRARY_PATH")):
226 os
.putenv("LD_LIBRARY_PATH", "%s:%s" % (os
.getenv("LD_LIBRARY_PATH"),
227 self
._options
.build_dir
))
229 os
.putenv("LD_LIBRARY_PATH", self
._options
.build_dir
)
231 def SimpleTest(self
, module
, name
, valgrind_test_args
=None, cmd_args
=None):
232 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
233 cmd
= self
._DefaultCommand
(tool
, name
, valgrind_test_args
)
234 self
._AppendGtestFilter
(tool
, name
, cmd
)
235 cmd
.extend(['--test-tiny-timeout=1000'])
239 self
.SetupLdPath(True)
240 return tool
.Run(cmd
, module
)
242 def RunCmdLine(self
):
243 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
244 cmd
= self
._DefaultCommand
(tool
, None, self
._args
)
245 self
.SetupLdPath(False)
246 return tool
.Run(cmd
, None)
248 def TestAppList(self
):
249 return self
.SimpleTest("app_list", "app_list_unittests")
252 return self
.SimpleTest("ash", "ash_unittests")
255 return self
.SimpleTest("aura", "aura_unittests")
258 return self
.SimpleTest("base", "base_unittests")
260 def TestChromeOS(self
):
261 return self
.SimpleTest("chromeos", "chromeos_unittests")
263 def TestComponents(self
):
264 return self
.SimpleTest("components", "components_unittests")
266 def TestCompositor(self
):
267 return self
.SimpleTest("compositor", "compositor_unittests")
269 def TestContent(self
):
270 return self
.SimpleTest("content", "content_unittests")
272 def TestContentBrowser(self
):
273 return self
.SimpleTest("content", "content_browsertests")
275 def TestCourgette(self
):
276 return self
.SimpleTest("courgette", "courgette_unittests")
278 def TestCrypto(self
):
279 return self
.SimpleTest("crypto", "crypto_unittests")
281 def TestDevice(self
):
282 return self
.SimpleTest("device", "device_unittests")
284 def TestFFmpeg(self
):
285 return self
.SimpleTest("chrome", "ffmpeg_unittests")
287 def TestFFmpegRegressions(self
):
288 return self
.SimpleTest("chrome", "ffmpeg_regression_tests")
291 return self
.SimpleTest("gpu", "gpu_unittests")
294 return self
.SimpleTest("ipc", "ipc_tests",
295 valgrind_test_args
=["--trace_children"])
297 def TestJingle(self
):
298 return self
.SimpleTest("chrome", "jingle_unittests")
301 return self
.SimpleTest("chrome", "media_unittests")
303 def TestMessageCenter(self
):
304 return self
.SimpleTest("message_center", "message_center_unittests")
307 return self
.SimpleTest("net", "net_unittests")
309 def TestNetPerf(self
):
310 return self
.SimpleTest("net", "net_perftests")
313 return self
.SimpleTest("chrome", "ppapi_unittests")
315 def TestPrinting(self
):
316 return self
.SimpleTest("chrome", "printing_unittests")
318 def TestRemoting(self
):
319 return self
.SimpleTest("chrome", "remoting_unittests",
321 "--ui-test-action-timeout=60000",
322 "--ui-test-action-max-timeout=150000"])
325 return self
.SimpleTest("chrome", "sql_unittests")
328 return self
.SimpleTest("chrome", "sync_unit_tests")
330 def TestLinuxSandbox(self
):
331 return self
.SimpleTest("sandbox", "sandbox_linux_unittests")
334 # http://crbug.com/51716
335 # Disabling all unit tests
336 # Problems reappeared after r119922
337 if common
.IsMac() and (self
._options
.valgrind_tool
== "memcheck"):
338 logging
.warning("unit_tests are disabled for memcheck on MacOS.")
340 return self
.SimpleTest("chrome", "unit_tests")
342 def TestUIUnit(self
):
343 return self
.SimpleTest("chrome", "ui_unittests")
346 return self
.SimpleTest("chrome", "url_unittests")
349 return self
.SimpleTest("views", "views_unittests")
351 # Valgrind timeouts are in seconds.
352 UI_VALGRIND_ARGS
= ["--timeout=14400", "--trace_children", "--indirect"]
353 # UI test timeouts are in milliseconds.
354 UI_TEST_ARGS
= ["--ui-test-action-timeout=60000",
355 "--ui-test-action-max-timeout=150000",
358 # TODO(thestig) fine-tune these values.
359 # Valgrind timeouts are in seconds.
360 BROWSER_VALGRIND_ARGS
= ["--timeout=50000", "--trace_children", "--indirect"]
361 # Browser test timeouts are in milliseconds.
362 BROWSER_TEST_ARGS
= ["--ui-test-action-timeout=400000",
363 "--ui-test-action-max-timeout=800000",
366 def TestAutomatedUI(self
):
367 return self
.SimpleTest("chrome", "automated_ui_tests",
368 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
369 cmd_args
=self
.UI_TEST_ARGS
)
371 def TestBrowser(self
):
372 return self
.SimpleTest("chrome", "browser_tests",
373 valgrind_test_args
=self
.BROWSER_VALGRIND_ARGS
,
374 cmd_args
=self
.BROWSER_TEST_ARGS
)
376 def TestInteractiveUI(self
):
377 return self
.SimpleTest("chrome", "interactive_ui_tests",
378 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
379 cmd_args
=self
.UI_TEST_ARGS
)
381 def TestReliability(self
):
382 script_dir
= path_utils
.ScriptDir()
383 url_list_file
= os
.path
.join(script_dir
, "reliability", "url_list.txt")
384 return self
.SimpleTest("chrome", "reliability_tests",
385 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
386 cmd_args
=(self
.UI_TEST_ARGS
+
387 ["--list=%s" % url_list_file
]))
389 def TestSafeBrowsing(self
):
390 return self
.SimpleTest("chrome", "safe_browsing_tests",
391 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
392 cmd_args
=(["--ui-test-action-max-timeout=450000"]))
394 def TestSyncIntegration(self
):
395 return self
.SimpleTest("chrome", "sync_integration_tests",
396 valgrind_test_args
=self
.UI_VALGRIND_ARGS
,
397 cmd_args
=(["--ui-test-action-max-timeout=450000"]))
399 def TestLayoutChunk(self
, chunk_num
, chunk_size
):
400 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
401 # list of tests. Wrap around to beginning of list at end.
402 # If chunk_size is zero, run all tests in the list once.
403 # If a text file is given as argument, it is used as the list of tests.
405 # Build the ginormous commandline in 'cmd'.
406 # It's going to be roughly
407 # python valgrind_test.py ... python run_webkit_tests.py ...
408 # but we'll use the --indirect flag to valgrind_test.py
409 # to avoid valgrinding python.
410 # Start by building the valgrind_test.py commandline.
411 tool
= valgrind_test
.CreateTool(self
._options
.valgrind_tool
)
412 cmd
= self
._DefaultCommand
(tool
)
413 cmd
.append("--trace_children")
414 cmd
.append("--indirect_webkit_layout")
415 cmd
.append("--ignore_exit_code")
416 # Now build script_cmd, the run_webkits_tests.py commandline
417 # Store each chunk in its own directory so that we can find the data later
418 chunk_dir
= os
.path
.join("layout", "chunk_%05d" % chunk_num
)
419 out_dir
= os
.path
.join(path_utils
.ScriptDir(), "latest")
420 out_dir
= os
.path
.join(out_dir
, chunk_dir
)
421 if os
.path
.exists(out_dir
):
422 old_files
= glob
.glob(os
.path
.join(out_dir
, "*.txt"))
427 script
= os
.path
.join(self
._source
_dir
, "webkit", "tools", "layout_tests",
428 "run_webkit_tests.py")
429 # http://crbug.com/260627: After the switch to content_shell from DRT, each
430 # test now brings up 3 processes. Under Valgrind, they become memory bound
431 # and can eventually OOM if we don't reduce the total count.
432 jobs
= max(1, int(multiprocessing
.cpu_count() * 0.3))
433 script_cmd
= ["python", script
, "-v",
434 "--run-singly", # run a separate DumpRenderTree for each test
436 "--child-processes=%d" % jobs
,
437 "--time-out-ms=200000",
438 "--no-retry-failures", # retrying takes too much time
439 # http://crbug.com/176908: Don't launch a browser when done.
441 "--nocheck-sys-deps"]
442 # Pass build mode to run_webkit_tests.py. We aren't passed it directly,
443 # so parse it out of build_dir. run_webkit_tests.py can only handle
444 # the two values "Release" and "Debug".
445 # TODO(Hercules): unify how all our scripts pass around build mode
446 # (--mode / --target / --build_dir / --debug)
447 if self
._options
.build_dir
:
448 build_root
, mode
= os
.path
.split(self
._options
.build_dir
)
449 script_cmd
.extend(["--build-directory", build_root
, "--target", mode
])
451 script_cmd
.append("--run-chunk=%d:%d" % (chunk_num
, chunk_size
))
453 # if the arg is a txt file, then treat it as a list of tests
454 if os
.path
.isfile(self
._args
[0]) and self
._args
[0][-4:] == ".txt":
455 script_cmd
.append("--test-list=%s" % self
._args
[0])
457 script_cmd
.extend(self
._args
)
458 self
._AppendGtestFilter
(tool
, "layout", script_cmd
)
459 # Now run script_cmd with the wrapper in cmd
461 cmd
.extend(script_cmd
)
463 # Layout tests often times fail quickly, but the buildbot remains green.
464 # Detect this situation when running with the default chunk size.
465 if chunk_size
== self
.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
:
466 min_runtime_in_seconds
=120
468 min_runtime_in_seconds
=0
469 ret
= tool
.Run(cmd
, "layout", min_runtime_in_seconds
=min_runtime_in_seconds
)
473 def TestLayout(self
):
474 # A "chunk file" is maintained in the local directory so that each test
475 # runs a slice of the layout tests of size chunk_size that increments with
476 # each run. Since tests can be added and removed from the layout tests at
477 # any time, this is not going to give exact coverage, but it will allow us
478 # to continuously run small slices of the layout tests under valgrind rather
479 # than having to run all of them in one shot.
480 chunk_size
= self
._options
.num_tests
481 if (chunk_size
== 0):
482 return self
.TestLayoutChunk(0, 0)
484 chunk_file
= os
.path
.join("valgrind_layout_chunk.txt")
485 logging
.info("Reading state from " + chunk_file
)
492 # This should be enough so that we have a couple of complete runs
493 # of test data stored in the archive (although note that when we loop
494 # that we almost guaranteed won't be at the end of the test list)
495 if chunk_num
> 10000:
498 except IOError, (errno
, strerror
):
499 logging
.error("error reading from file %s (%d, %s)" % (chunk_file
,
501 # Save the new chunk size before running the tests. Otherwise if a
502 # particular chunk hangs the bot, the chunk number will never get
503 # incremented and the bot will be wedged.
504 logging
.info("Saving state to " + chunk_file
)
506 f
= open(chunk_file
, "w")
508 f
.write("%d" % chunk_num
)
510 except IOError, (errno
, strerror
):
511 logging
.error("error writing to file %s (%d, %s)" % (chunk_file
, errno
,
513 # Since we're running small chunks of the layout tests, it's important to
514 # mark the ones that have errors in them. These won't be visible in the
515 # summary list for long, but will be useful for someone reviewing this bot.
516 return self
.TestLayoutChunk(chunk_num
, chunk_size
)
518 # The known list of tests.
519 # Recognise the original abbreviations as well as full executable names.
521 "cmdline" : RunCmdLine
,
522 "app_list": TestAppList
, "app_list_unittests": TestAppList
,
523 "ash": TestAsh
, "ash_unittests": TestAsh
,
524 "aura": TestAura
, "aura_unittests": TestAura
,
525 "automated_ui" : TestAutomatedUI
,
526 "base": TestBase
, "base_unittests": TestBase
,
527 "browser": TestBrowser
, "browser_tests": TestBrowser
,
528 "chromeos": TestChromeOS
, "chromeos_unittests": TestChromeOS
,
529 "components": TestComponents
,"components_unittests": TestComponents
,
530 "compositor": TestCompositor
,"compositor_unittests": TestCompositor
,
531 "content": TestContent
, "content_unittests": TestContent
,
532 "content_browsertests": TestContentBrowser
,
533 "courgette": TestCourgette
, "courgette_unittests": TestCourgette
,
534 "crypto": TestCrypto
, "crypto_unittests": TestCrypto
,
535 "device": TestDevice
, "device_unittests": TestDevice
,
536 "ffmpeg": TestFFmpeg
, "ffmpeg_unittests": TestFFmpeg
,
537 "ffmpeg_regression_tests": TestFFmpegRegressions
,
538 "gpu": TestGPU
, "gpu_unittests": TestGPU
,
539 "ipc": TestIpc
, "ipc_tests": TestIpc
,
540 "interactive_ui": TestInteractiveUI
,
541 "jingle": TestJingle
, "jingle_unittests": TestJingle
,
542 "layout": TestLayout
, "layout_tests": TestLayout
,
543 "webkit": TestLayout
,
544 "media": TestMedia
, "media_unittests": TestMedia
,
545 "message_center": TestMessageCenter
,
546 "message_center_unittests" : TestMessageCenter
,
547 "net": TestNet
, "net_unittests": TestNet
,
548 "net_perf": TestNetPerf
, "net_perftests": TestNetPerf
,
549 "ppapi": TestPPAPI
, "ppapi_unittests": TestPPAPI
,
550 "printing": TestPrinting
, "printing_unittests": TestPrinting
,
551 "reliability": TestReliability
, "reliability_tests": TestReliability
,
552 "remoting": TestRemoting
, "remoting_unittests": TestRemoting
,
553 "safe_browsing": TestSafeBrowsing
, "safe_browsing_tests": TestSafeBrowsing
,
554 "sandbox": TestLinuxSandbox
, "sandbox_linux_unittests": TestLinuxSandbox
,
555 "sql": TestSql
, "sql_unittests": TestSql
,
556 "sync": TestSync
, "sync_unit_tests": TestSync
,
557 "sync_integration_tests": TestSyncIntegration
,
558 "sync_integration": TestSyncIntegration
,
559 "ui_unit": TestUIUnit
, "ui_unittests": TestUIUnit
,
560 "unit": TestUnit
, "unit_tests": TestUnit
,
561 "url": TestURL
, "url_unittests": TestURL
,
562 "views": TestViews
, "views_unittests": TestViews
,
567 parser
= optparse
.OptionParser("usage: %prog -b <dir> -t <test> "
569 parser
.disable_interspersed_args()
571 parser
.add_option("", "--help-tests", dest
="help_tests", action
="store_true",
572 default
=False, help="List all available tests")
573 parser
.add_option("-b", "--build_dir",
574 help="the location of the compiler output")
575 parser
.add_option("-t", "--test", action
="append", default
=[],
576 help="which test to run, supports test:gtest_filter format "
578 parser
.add_option("", "--baseline", action
="store_true", default
=False,
579 help="generate baseline data instead of validating")
580 parser
.add_option("", "--gtest_filter",
581 help="additional arguments to --gtest_filter")
582 parser
.add_option("", "--gtest_repeat",
583 help="argument for --gtest_repeat")
584 parser
.add_option("-v", "--verbose", action
="store_true", default
=False,
585 help="verbose output - enable debug log messages")
586 parser
.add_option("", "--tool", dest
="valgrind_tool", default
="memcheck",
587 help="specify a valgrind tool to run the tests under")
588 parser
.add_option("", "--tool_flags", dest
="valgrind_tool_flags", default
="",
589 help="specify custom flags for the selected valgrind tool")
590 parser
.add_option("", "--keep_logs", action
="store_true", default
=False,
591 help="store memory tool logs in the <tool>.logs directory "
592 "instead of /tmp.\nThis can be useful for tool "
593 "developers/maintainers.\nPlease note that the <tool>"
594 ".logs directory will be clobbered on tool startup.")
595 parser
.add_option("-n", "--num_tests", type="int",
596 default
=ChromeTests
.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE
,
597 help="for layout tests: # of subtests per run. 0 for all.")
598 # TODO(thestig) Remove this if we can.
599 parser
.add_option("", "--gtest_color", dest
="gtest_color", default
="no",
600 help="dummy compatibility flag for sharding_supervisor.")
602 options
, args
= parser
.parse_args()
605 logging_utils
.config_root(logging
.DEBUG
)
607 logging_utils
.config_root()
609 if options
.help_tests
:
610 ChromeTests
.ShowTests()
614 parser
.error("--test not specified")
616 if len(options
.test
) != 1 and options
.gtest_filter
:
617 parser
.error("--gtest_filter and multiple tests don't make sense together")
619 for t
in options
.test
:
620 tests
= ChromeTests(options
, args
, t
)
626 if __name__
== "__main__":