1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/test/launcher/test_launcher.h"
11 #include "base/at_exit.h"
12 #include "base/bind.h"
13 #include "base/command_line.h"
14 #include "base/environment.h"
15 #include "base/files/file_path.h"
16 #include "base/files/file_util.h"
17 #include "base/files/scoped_file.h"
18 #include "base/format_macros.h"
19 #include "base/hash.h"
20 #include "base/lazy_instance.h"
21 #include "base/location.h"
22 #include "base/logging.h"
23 #include "base/memory/scoped_ptr.h"
24 #include "base/message_loop/message_loop.h"
25 #include "base/process/kill.h"
26 #include "base/process/launch.h"
27 #include "base/single_thread_task_runner.h"
28 #include "base/strings/pattern.h"
29 #include "base/strings/string_number_conversions.h"
30 #include "base/strings/string_split.h"
31 #include "base/strings/string_util.h"
32 #include "base/strings/stringize_macros.h"
33 #include "base/strings/stringprintf.h"
34 #include "base/strings/utf_string_conversions.h"
35 #include "base/test/gtest_util.h"
36 #include "base/test/launcher/test_results_tracker.h"
37 #include "base/test/sequenced_worker_pool_owner.h"
38 #include "base/test/test_switches.h"
39 #include "base/test/test_timeouts.h"
40 #include "base/thread_task_runner_handle.h"
41 #include "base/threading/thread_checker.h"
42 #include "base/time/time.h"
43 #include "testing/gtest/include/gtest/gtest.h"
45 #if defined(OS_MACOSX)
46 #include "base/mac/scoped_nsautorelease_pool.h"
50 #include "base/win/windows_version.h"
55 // See https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ .
58 // The environment variable name for the total number of test shards.
59 const char kTestTotalShards
[] = "GTEST_TOTAL_SHARDS";
60 // The environment variable name for the test shard index.
61 const char kTestShardIndex
[] = "GTEST_SHARD_INDEX";
65 // Global tag for test runs where the results are incomplete or unreliable
66 // for any reason, e.g. early exit because of too many broken tests.
67 const char kUnreliableResultsTag
[] = "UNRELIABLE_RESULTS";
69 // Maximum time of no output after which we print list of processes still
70 // running. This deliberately doesn't use TestTimeouts (which is otherwise
71 // a recommended solution), because they can be increased. This would defeat
72 // the purpose of this timeout, which is 1) to avoid buildbot "no output for
73 // X seconds" timeout killing the process 2) help communicate status of
74 // the test launcher to people looking at the output (no output for a long
75 // time is mysterious and gives no info about what is happening) 3) help
76 // debugging in case the process hangs anyway.
77 const int kOutputTimeoutSeconds
= 15;
79 // Limit of output snippet lines when printing to stdout.
80 // Avoids flooding the logs with amount of output that gums up
81 // the infrastructure.
82 const size_t kOutputSnippetLinesLimit
= 5000;
84 // Set of live launch test processes with corresponding lock (it is allowed
85 // for callers to launch processes on different threads).
86 LazyInstance
<std::map
<ProcessHandle
, CommandLine
> > g_live_processes
87 = LAZY_INSTANCE_INITIALIZER
;
88 LazyInstance
<Lock
> g_live_processes_lock
= LAZY_INSTANCE_INITIALIZER
;
91 // Self-pipe that makes it possible to do complex shutdown handling
92 // outside of the signal handler.
93 int g_shutdown_pipe
[2] = { -1, -1 };
95 void ShutdownPipeSignalHandler(int signal
) {
96 HANDLE_EINTR(write(g_shutdown_pipe
[1], "q", 1));
99 void KillSpawnedTestProcesses() {
100 // Keep the lock until exiting the process to prevent further processes
101 // from being spawned.
102 AutoLock
lock(g_live_processes_lock
.Get());
105 "Sending SIGTERM to %" PRIuS
" child processes... ",
106 g_live_processes
.Get().size());
109 for (std::map
<ProcessHandle
, CommandLine
>::iterator i
=
110 g_live_processes
.Get().begin();
111 i
!= g_live_processes
.Get().end();
113 // Send the signal to entire process group.
114 kill((-1) * (i
->first
), SIGTERM
);
118 "done.\nGiving processes a chance to terminate cleanly... ");
121 PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
123 fprintf(stdout
, "done.\n");
127 "Sending SIGKILL to %" PRIuS
" child processes... ",
128 g_live_processes
.Get().size());
131 for (std::map
<ProcessHandle
, CommandLine
>::iterator i
=
132 g_live_processes
.Get().begin();
133 i
!= g_live_processes
.Get().end();
135 // Send the signal to entire process group.
136 kill((-1) * (i
->first
), SIGKILL
);
139 fprintf(stdout
, "done.\n");
143 // I/O watcher for the reading end of the self-pipe above.
144 // Terminates any launched child processes and exits the process.
145 class SignalFDWatcher
: public MessageLoopForIO::Watcher
{
150 void OnFileCanReadWithoutBlocking(int fd
) override
{
151 fprintf(stdout
, "\nCaught signal. Killing spawned test processes...\n");
154 KillSpawnedTestProcesses();
156 // The signal would normally kill the process, so exit now.
160 void OnFileCanWriteWithoutBlocking(int fd
) override
{ NOTREACHED(); }
163 DISALLOW_COPY_AND_ASSIGN(SignalFDWatcher
);
165 #endif // defined(OS_POSIX)
167 // Parses the environment variable var as an Int32. If it is unset, returns
168 // true. If it is set, unsets it then converts it to Int32 before
169 // returning it in |result|. Returns true on success.
170 bool TakeInt32FromEnvironment(const char* const var
, int32
* result
) {
171 scoped_ptr
<Environment
> env(Environment::Create());
174 if (!env
->GetVar(var
, &str_val
))
177 if (!env
->UnSetVar(var
)) {
178 LOG(ERROR
) << "Invalid environment: we could not unset " << var
<< ".\n";
182 if (!StringToInt(str_val
, result
)) {
183 LOG(ERROR
) << "Invalid environment: " << var
<< " is not an integer.\n";
190 // Unsets the environment variable |name| and returns true on success.
191 // Also returns true if the variable just doesn't exist.
192 bool UnsetEnvironmentVariableIfExists(const std::string
& name
) {
193 scoped_ptr
<Environment
> env(Environment::Create());
196 if (!env
->GetVar(name
.c_str(), &str_val
))
199 return env
->UnSetVar(name
.c_str());
202 // Returns true if bot mode has been requested, i.e. defaults optimized
203 // for continuous integration bots. This way developers don't have to remember
204 // special command-line flags.
205 bool BotModeEnabled() {
206 scoped_ptr
<Environment
> env(Environment::Create());
207 return CommandLine::ForCurrentProcess()->HasSwitch(
208 switches::kTestLauncherBotMode
) ||
209 env
->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
212 // Returns command line command line after gtest-specific processing
213 // and applying |wrapper|.
214 CommandLine
PrepareCommandLineForGTest(const CommandLine
& command_line
,
215 const std::string
& wrapper
) {
216 CommandLine
new_command_line(command_line
.GetProgram());
217 CommandLine::SwitchMap switches
= command_line
.GetSwitches();
219 // Strip out gtest_repeat flag - this is handled by the launcher process.
220 switches
.erase(kGTestRepeatFlag
);
222 // Don't try to write the final XML report in child processes.
223 switches
.erase(kGTestOutputFlag
);
225 for (CommandLine::SwitchMap::const_iterator iter
= switches
.begin();
226 iter
!= switches
.end(); ++iter
) {
227 new_command_line
.AppendSwitchNative((*iter
).first
, (*iter
).second
);
230 // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
231 // does not really support removing switches well, and trying to do that
232 // on a CommandLine with a wrapper is known to break.
233 // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
235 new_command_line
.PrependWrapper(ASCIIToUTF16(wrapper
));
236 #elif defined(OS_POSIX)
237 new_command_line
.PrependWrapper(wrapper
);
240 return new_command_line
;
243 // Launches a child process using |command_line|. If the child process is still
244 // running after |timeout|, it is terminated and |*was_timeout| is set to true.
245 // Returns exit code of the process.
246 int LaunchChildTestProcessWithOptions(const CommandLine
& command_line
,
247 const LaunchOptions
& options
,
251 #if defined(OS_POSIX)
252 // Make sure an option we rely on is present - see LaunchChildGTestProcess.
253 DCHECK(options
.new_process_group
);
256 LaunchOptions
new_options(options
);
259 DCHECK(!new_options
.job_handle
);
261 win::ScopedHandle job_handle
;
262 if (flags
& TestLauncher::USE_JOB_OBJECTS
) {
263 job_handle
.Set(CreateJobObject(NULL
, NULL
));
264 if (!job_handle
.IsValid()) {
265 LOG(ERROR
) << "Could not create JobObject.";
269 DWORD job_flags
= JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
;
271 // Allow break-away from job since sandbox and few other places rely on it
272 // on Windows versions prior to Windows 8 (which supports nested jobs).
273 if (win::GetVersion() < win::VERSION_WIN8
&&
274 flags
& TestLauncher::ALLOW_BREAKAWAY_FROM_JOB
) {
275 job_flags
|= JOB_OBJECT_LIMIT_BREAKAWAY_OK
;
278 if (!SetJobObjectLimitFlags(job_handle
.Get(), job_flags
)) {
279 LOG(ERROR
) << "Could not SetJobObjectLimitFlags.";
283 new_options
.job_handle
= job_handle
.Get();
285 #endif // defined(OS_WIN)
287 #if defined(OS_LINUX)
288 // To prevent accidental privilege sharing to an untrusted child, processes
289 // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
290 // new child will be privileged and trusted.
291 new_options
.allow_new_privs
= true;
297 // Note how we grab the lock before the process possibly gets created.
298 // This ensures that when the lock is held, ALL the processes are registered
300 AutoLock
lock(g_live_processes_lock
.Get());
302 process
= LaunchProcess(command_line
, new_options
);
303 if (!process
.IsValid())
306 // TODO(rvargas) crbug.com/417532: Don't store process handles.
307 g_live_processes
.Get().insert(std::make_pair(process
.Handle(),
312 if (!process
.WaitForExitWithTimeout(timeout
, &exit_code
)) {
314 exit_code
= -1; // Set a non-zero exit code to signal a failure.
316 // Ensure that the process terminates.
317 process
.Terminate(-1, true);
321 // Note how we grab the log before issuing a possibly broad process kill.
322 // Other code parts that grab the log kill processes, so avoid trying
323 // to do that twice and trigger all kinds of log messages.
324 AutoLock
lock(g_live_processes_lock
.Get());
326 #if defined(OS_POSIX)
327 if (exit_code
!= 0) {
328 // On POSIX, in case the test does not exit cleanly, either due to a crash
329 // or due to it timing out, we need to clean up any child processes that
330 // it might have created. On Windows, child processes are automatically
331 // cleaned up using JobObjects.
332 KillProcessGroup(process
.Handle());
336 g_live_processes
.Get().erase(process
.Handle());
343 const TestLauncher::LaunchChildGTestProcessCallback
& callback
,
345 const TimeDelta
& elapsed_time
,
347 const std::string
& output
) {
348 callback
.Run(exit_code
, elapsed_time
, was_timeout
, output
);
351 void DoLaunchChildTestProcess(
352 const CommandLine
& command_line
,
356 SingleThreadTaskRunner
* task_runner
,
357 const TestLauncher::LaunchChildGTestProcessCallback
& callback
) {
358 TimeTicks start_time
= TimeTicks::Now();
360 // Redirect child process output to a file.
361 FilePath output_file
;
362 CHECK(CreateTemporaryFile(&output_file
));
364 LaunchOptions options
;
366 win::ScopedHandle handle
;
368 if (redirect_stdio
) {
369 // Make the file handle inheritable by the child.
370 SECURITY_ATTRIBUTES sa_attr
;
371 sa_attr
.nLength
= sizeof(SECURITY_ATTRIBUTES
);
372 sa_attr
.lpSecurityDescriptor
= NULL
;
373 sa_attr
.bInheritHandle
= TRUE
;
375 handle
.Set(CreateFile(output_file
.value().c_str(),
377 FILE_SHARE_READ
| FILE_SHARE_DELETE
,
380 FILE_ATTRIBUTE_TEMPORARY
,
382 CHECK(handle
.IsValid());
383 options
.inherit_handles
= true;
384 options
.stdin_handle
= INVALID_HANDLE_VALUE
;
385 options
.stdout_handle
= handle
.Get();
386 options
.stderr_handle
= handle
.Get();
388 #elif defined(OS_POSIX)
389 options
.new_process_group
= true;
390 #if defined(OS_LINUX)
391 options
.kill_on_parent_death
= true;
392 #endif // defined(OS_LINUX)
394 FileHandleMappingVector fds_mapping
;
395 ScopedFD output_file_fd
;
397 if (redirect_stdio
) {
398 output_file_fd
.reset(open(output_file
.value().c_str(), O_RDWR
));
399 CHECK(output_file_fd
.is_valid());
401 fds_mapping
.push_back(std::make_pair(output_file_fd
.get(), STDOUT_FILENO
));
402 fds_mapping
.push_back(std::make_pair(output_file_fd
.get(), STDERR_FILENO
));
403 options
.fds_to_remap
= &fds_mapping
;
407 bool was_timeout
= false;
408 int exit_code
= LaunchChildTestProcessWithOptions(
409 command_line
, options
, flags
, timeout
, &was_timeout
);
411 if (redirect_stdio
) {
413 FlushFileBuffers(handle
.Get());
415 #elif defined(OS_POSIX)
416 output_file_fd
.reset();
420 std::string output_file_contents
;
421 CHECK(ReadFileToString(output_file
, &output_file_contents
));
423 if (!DeleteFile(output_file
, false)) {
424 // This needs to be non-fatal at least for Windows.
425 LOG(WARNING
) << "Failed to delete " << output_file
.AsUTF8Unsafe();
428 // Run target callback on the thread it was originating from, not on
429 // a worker pool thread.
430 task_runner
->PostTask(FROM_HERE
, Bind(&RunCallback
, callback
, exit_code
,
431 TimeTicks::Now() - start_time
,
432 was_timeout
, output_file_contents
));
437 const char kGTestFilterFlag
[] = "gtest_filter";
438 const char kGTestHelpFlag
[] = "gtest_help";
439 const char kGTestListTestsFlag
[] = "gtest_list_tests";
440 const char kGTestRepeatFlag
[] = "gtest_repeat";
441 const char kGTestRunDisabledTestsFlag
[] = "gtest_also_run_disabled_tests";
442 const char kGTestOutputFlag
[] = "gtest_output";
444 TestLauncherDelegate::~TestLauncherDelegate() {
447 TestLauncher::TestLauncher(TestLauncherDelegate
* launcher_delegate
,
448 size_t parallel_jobs
)
449 : launcher_delegate_(launcher_delegate
),
453 test_started_count_(0),
454 test_finished_count_(0),
455 test_success_count_(0),
456 test_broken_count_(0),
460 watchdog_timer_(FROM_HERE
,
461 TimeDelta::FromSeconds(kOutputTimeoutSeconds
),
463 &TestLauncher::OnOutputTimeout
),
464 parallel_jobs_(parallel_jobs
) {
467 TestLauncher::~TestLauncher() {
468 if (worker_pool_owner_
)
469 worker_pool_owner_
->pool()->Shutdown();
472 bool TestLauncher::Run() {
476 // Value of |cycles_| changes after each iteration. Keep track of the
478 int requested_cycles
= cycles_
;
480 #if defined(OS_POSIX)
481 CHECK_EQ(0, pipe(g_shutdown_pipe
));
483 struct sigaction action
;
484 memset(&action
, 0, sizeof(action
));
485 sigemptyset(&action
.sa_mask
);
486 action
.sa_handler
= &ShutdownPipeSignalHandler
;
488 CHECK_EQ(0, sigaction(SIGINT
, &action
, NULL
));
489 CHECK_EQ(0, sigaction(SIGQUIT
, &action
, NULL
));
490 CHECK_EQ(0, sigaction(SIGTERM
, &action
, NULL
));
492 MessageLoopForIO::FileDescriptorWatcher controller
;
493 SignalFDWatcher watcher
;
495 CHECK(MessageLoopForIO::current()->WatchFileDescriptor(
498 MessageLoopForIO::WATCH_READ
,
501 #endif // defined(OS_POSIX)
503 // Start the watchdog timer.
504 watchdog_timer_
.Reset();
506 ThreadTaskRunnerHandle::Get()->PostTask(
507 FROM_HERE
, Bind(&TestLauncher::RunTestIteration
, Unretained(this)));
509 MessageLoop::current()->Run();
511 if (requested_cycles
!= 1)
512 results_tracker_
.PrintSummaryOfAllIterations();
514 MaybeSaveSummaryAsJSON();
519 void TestLauncher::LaunchChildGTestProcess(
520 const CommandLine
& command_line
,
521 const std::string
& wrapper
,
524 const LaunchChildGTestProcessCallback
& callback
) {
525 DCHECK(thread_checker_
.CalledOnValidThread());
527 // Record the exact command line used to launch the child.
528 CommandLine
new_command_line(
529 PrepareCommandLineForGTest(command_line
, wrapper
));
531 // When running in parallel mode we need to redirect stdio to avoid mixed-up
532 // output. We also always redirect on the bots to get the test output into
534 bool redirect_stdio
= (parallel_jobs_
> 1) || BotModeEnabled();
536 worker_pool_owner_
->pool()->PostWorkerTask(
537 FROM_HERE
, Bind(&DoLaunchChildTestProcess
, new_command_line
, timeout
,
538 flags
, redirect_stdio
, ThreadTaskRunnerHandle::Get(),
539 Bind(&TestLauncher::OnLaunchTestProcessFinished
,
540 Unretained(this), callback
)));
543 void TestLauncher::OnTestFinished(const TestResult
& result
) {
544 ++test_finished_count_
;
546 bool print_snippet
= false;
547 std::string
print_test_stdio("auto");
548 if (CommandLine::ForCurrentProcess()->HasSwitch(
549 switches::kTestLauncherPrintTestStdio
)) {
550 print_test_stdio
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
551 switches::kTestLauncherPrintTestStdio
);
553 if (print_test_stdio
== "auto") {
554 print_snippet
= (result
.status
!= TestResult::TEST_SUCCESS
);
555 } else if (print_test_stdio
== "always") {
556 print_snippet
= true;
557 } else if (print_test_stdio
== "never") {
558 print_snippet
= false;
560 LOG(WARNING
) << "Invalid value of " << switches::kTestLauncherPrintTestStdio
561 << ": " << print_test_stdio
;
564 std::vector
<std::string
> snippet_lines
;
565 SplitStringDontTrim(result
.output_snippet
, '\n', &snippet_lines
);
566 if (snippet_lines
.size() > kOutputSnippetLinesLimit
) {
567 size_t truncated_size
= snippet_lines
.size() - kOutputSnippetLinesLimit
;
569 snippet_lines
.begin(),
570 snippet_lines
.begin() + truncated_size
);
571 snippet_lines
.insert(snippet_lines
.begin(), "<truncated>");
573 fprintf(stdout
, "%s", JoinString(snippet_lines
, "\n").c_str());
577 if (result
.status
== TestResult::TEST_SUCCESS
) {
578 ++test_success_count_
;
580 tests_to_retry_
.insert(result
.full_name
);
583 results_tracker_
.AddTestResult(result
);
585 // TODO(phajdan.jr): Align counter (padding).
586 std::string
status_line(
587 StringPrintf("[%" PRIuS
"/%" PRIuS
"] %s ",
588 test_finished_count_
,
590 result
.full_name
.c_str()));
591 if (result
.completed()) {
592 status_line
.append(StringPrintf("(%" PRId64
" ms)",
593 result
.elapsed_time
.InMilliseconds()));
594 } else if (result
.status
== TestResult::TEST_TIMEOUT
) {
595 status_line
.append("(TIMED OUT)");
596 } else if (result
.status
== TestResult::TEST_CRASH
) {
597 status_line
.append("(CRASHED)");
598 } else if (result
.status
== TestResult::TEST_SKIPPED
) {
599 status_line
.append("(SKIPPED)");
600 } else if (result
.status
== TestResult::TEST_UNKNOWN
) {
601 status_line
.append("(UNKNOWN)");
603 // Fail very loudly so it's not ignored.
604 CHECK(false) << "Unhandled test result status: " << result
.status
;
606 fprintf(stdout
, "%s\n", status_line
.c_str());
609 // We just printed a status line, reset the watchdog timer.
610 watchdog_timer_
.Reset();
612 // Do not waste time on timeouts. We include tests with unknown results here
613 // because sometimes (e.g. hang in between unit tests) that's how a timeout
615 if (result
.status
== TestResult::TEST_TIMEOUT
||
616 result
.status
== TestResult::TEST_UNKNOWN
) {
617 test_broken_count_
++;
619 size_t broken_threshold
=
620 std::max(static_cast<size_t>(20), test_started_count_
/ 10);
621 if (test_broken_count_
>= broken_threshold
) {
622 fprintf(stdout
, "Too many badly broken tests (%" PRIuS
"), exiting now.\n",
626 #if defined(OS_POSIX)
627 KillSpawnedTestProcesses();
628 #endif // defined(OS_POSIX)
630 results_tracker_
.AddGlobalTag("BROKEN_TEST_EARLY_EXIT");
631 results_tracker_
.AddGlobalTag(kUnreliableResultsTag
);
632 MaybeSaveSummaryAsJSON();
637 if (test_finished_count_
!= test_started_count_
)
640 if (tests_to_retry_
.empty() || retry_count_
>= retry_limit_
) {
641 OnTestIterationFinished();
645 if (tests_to_retry_
.size() >= broken_threshold
) {
647 "Too many failing tests (%" PRIuS
"), skipping retries.\n",
648 tests_to_retry_
.size());
651 results_tracker_
.AddGlobalTag("BROKEN_TEST_SKIPPED_RETRIES");
652 results_tracker_
.AddGlobalTag(kUnreliableResultsTag
);
654 OnTestIterationFinished();
660 std::vector
<std::string
> test_names(tests_to_retry_
.begin(),
661 tests_to_retry_
.end());
663 tests_to_retry_
.clear();
665 size_t retry_started_count
= launcher_delegate_
->RetryTests(this, test_names
);
666 if (retry_started_count
== 0) {
667 // Signal failure, but continue to run all requested test iterations.
668 // With the summary of all iterations at the end this is a good default.
671 OnTestIterationFinished();
675 fprintf(stdout
, "Retrying %" PRIuS
" test%s (retry #%" PRIuS
")\n",
677 retry_started_count
> 1 ? "s" : "",
681 test_started_count_
+= retry_started_count
;
684 bool TestLauncher::Init() {
685 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
687 // Initialize sharding. Command line takes precedence over legacy environment
689 if (command_line
->HasSwitch(switches::kTestLauncherTotalShards
) &&
690 command_line
->HasSwitch(switches::kTestLauncherShardIndex
)) {
692 command_line
->GetSwitchValueASCII(
693 switches::kTestLauncherTotalShards
),
695 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherTotalShards
;
699 command_line
->GetSwitchValueASCII(
700 switches::kTestLauncherShardIndex
),
702 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherShardIndex
;
706 "Using sharding settings from command line. This is shard %d/%d\n",
707 shard_index_
, total_shards_
);
710 if (!TakeInt32FromEnvironment(kTestTotalShards
, &total_shards_
))
712 if (!TakeInt32FromEnvironment(kTestShardIndex
, &shard_index_
))
715 "Using sharding settings from environment. This is shard %d/%d\n",
716 shard_index_
, total_shards_
);
719 if (shard_index_
< 0 ||
721 shard_index_
>= total_shards_
) {
722 LOG(ERROR
) << "Invalid sharding settings: we require 0 <= "
723 << kTestShardIndex
<< " < " << kTestTotalShards
724 << ", but you have " << kTestShardIndex
<< "=" << shard_index_
725 << ", " << kTestTotalShards
<< "=" << total_shards_
<< ".\n";
729 // Make sure we don't pass any sharding-related environment to the child
730 // processes. This test launcher implements the sharding completely.
731 CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
732 CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
734 if (command_line
->HasSwitch(kGTestRepeatFlag
) &&
735 !StringToInt(command_line
->GetSwitchValueASCII(kGTestRepeatFlag
),
737 LOG(ERROR
) << "Invalid value for " << kGTestRepeatFlag
;
741 if (command_line
->HasSwitch(switches::kTestLauncherRetryLimit
)) {
742 int retry_limit
= -1;
743 if (!StringToInt(command_line
->GetSwitchValueASCII(
744 switches::kTestLauncherRetryLimit
), &retry_limit
) ||
746 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherRetryLimit
;
750 retry_limit_
= retry_limit
;
751 } else if (!command_line
->HasSwitch(kGTestFilterFlag
) || BotModeEnabled()) {
752 // Retry failures 3 times by default if we are running all of the tests or
757 if (command_line
->HasSwitch(switches::kTestLauncherJobs
)) {
759 if (!StringToInt(command_line
->GetSwitchValueASCII(
760 switches::kTestLauncherJobs
), &jobs
) ||
762 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherJobs
;
766 parallel_jobs_
= jobs
;
767 } else if (command_line
->HasSwitch(kGTestFilterFlag
) && !BotModeEnabled()) {
768 // Do not run jobs in parallel by default if we are running a subset of
769 // the tests and if bot mode is off.
773 fprintf(stdout
, "Using %" PRIuS
" parallel jobs.\n", parallel_jobs_
);
775 worker_pool_owner_
.reset(
776 new SequencedWorkerPoolOwner(parallel_jobs_
, "test_launcher"));
778 if (command_line
->HasSwitch(switches::kTestLauncherFilterFile
) &&
779 command_line
->HasSwitch(kGTestFilterFlag
)) {
780 LOG(ERROR
) << "Only one of --test-launcher-filter-file and --gtest_filter "
781 << "at a time is allowed.";
785 if (command_line
->HasSwitch(switches::kTestLauncherFilterFile
)) {
787 if (!ReadFileToString(
788 command_line
->GetSwitchValuePath(switches::kTestLauncherFilterFile
),
790 LOG(ERROR
) << "Failed to read the filter file.";
794 std::vector
<std::string
> filter_lines
;
795 SplitString(filter
, '\n', &filter_lines
);
796 for (size_t i
= 0; i
< filter_lines
.size(); i
++) {
797 if (filter_lines
[i
].empty())
800 if (filter_lines
[i
][0] == '-')
801 negative_test_filter_
.push_back(filter_lines
[i
].substr(1));
803 positive_test_filter_
.push_back(filter_lines
[i
]);
806 // Split --gtest_filter at '-', if there is one, to separate into
807 // positive filter and negative filter portions.
808 std::string filter
= command_line
->GetSwitchValueASCII(kGTestFilterFlag
);
809 size_t dash_pos
= filter
.find('-');
810 if (dash_pos
== std::string::npos
) {
811 SplitString(filter
, ':', &positive_test_filter_
);
813 // Everything up to the dash.
814 SplitString(filter
.substr(0, dash_pos
), ':', &positive_test_filter_
);
816 // Everything after the dash.
817 SplitString(filter
.substr(dash_pos
+ 1), ':', &negative_test_filter_
);
821 if (!launcher_delegate_
->GetTests(&tests_
)) {
822 LOG(ERROR
) << "Failed to get list of tests.";
826 if (!results_tracker_
.Init(*command_line
)) {
827 LOG(ERROR
) << "Failed to initialize test results tracker.";
832 results_tracker_
.AddGlobalTag("MODE_RELEASE");
834 results_tracker_
.AddGlobalTag("MODE_DEBUG");
837 // Operating systems (sorted alphabetically).
838 // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
840 #if defined(OS_ANDROID)
841 results_tracker_
.AddGlobalTag("OS_ANDROID");
845 results_tracker_
.AddGlobalTag("OS_BSD");
848 #if defined(OS_FREEBSD)
849 results_tracker_
.AddGlobalTag("OS_FREEBSD");
853 results_tracker_
.AddGlobalTag("OS_IOS");
856 #if defined(OS_LINUX)
857 results_tracker_
.AddGlobalTag("OS_LINUX");
860 #if defined(OS_MACOSX)
861 results_tracker_
.AddGlobalTag("OS_MACOSX");
865 results_tracker_
.AddGlobalTag("OS_NACL");
868 #if defined(OS_OPENBSD)
869 results_tracker_
.AddGlobalTag("OS_OPENBSD");
872 #if defined(OS_POSIX)
873 results_tracker_
.AddGlobalTag("OS_POSIX");
876 #if defined(OS_SOLARIS)
877 results_tracker_
.AddGlobalTag("OS_SOLARIS");
881 results_tracker_
.AddGlobalTag("OS_WIN");
885 #if defined(ARCH_CPU_32_BITS)
886 results_tracker_
.AddGlobalTag("CPU_32_BITS");
889 #if defined(ARCH_CPU_64_BITS)
890 results_tracker_
.AddGlobalTag("CPU_64_BITS");
896 void TestLauncher::RunTests() {
897 std::vector
<std::string
> test_names
;
898 for (size_t i
= 0; i
< tests_
.size(); i
++) {
899 std::string test_name
= FormatFullTestName(
900 tests_
[i
].first
, tests_
[i
].second
);
902 results_tracker_
.AddTest(test_name
);
904 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
905 if (test_name
.find("DISABLED") != std::string::npos
) {
906 results_tracker_
.AddDisabledTest(test_name
);
908 // Skip disabled tests unless explicitly requested.
909 if (!command_line
->HasSwitch(kGTestRunDisabledTestsFlag
))
913 if (!launcher_delegate_
->ShouldRunTest(tests_
[i
].first
, tests_
[i
].second
))
916 // Skip the test that doesn't match the filter (if given).
917 if (!positive_test_filter_
.empty()) {
919 for (size_t k
= 0; k
< positive_test_filter_
.size(); ++k
) {
920 if (MatchPattern(test_name
, positive_test_filter_
[k
])) {
929 bool excluded
= false;
930 for (size_t k
= 0; k
< negative_test_filter_
.size(); ++k
) {
931 if (MatchPattern(test_name
, negative_test_filter_
[k
])) {
939 if (Hash(test_name
) % total_shards_
!= static_cast<uint32
>(shard_index_
))
942 test_names
.push_back(test_name
);
945 test_started_count_
= launcher_delegate_
->RunTests(this, test_names
);
947 if (test_started_count_
== 0) {
948 fprintf(stdout
, "0 tests run\n");
951 // No tests have actually been started, so kick off the next iteration.
952 ThreadTaskRunnerHandle::Get()->PostTask(
953 FROM_HERE
, Bind(&TestLauncher::RunTestIteration
, Unretained(this)));
957 void TestLauncher::RunTestIteration() {
959 MessageLoop::current()->Quit();
963 // Special value "-1" means "repeat indefinitely".
964 cycles_
= (cycles_
== -1) ? cycles_
: cycles_
- 1;
966 test_started_count_
= 0;
967 test_finished_count_
= 0;
968 test_success_count_
= 0;
969 test_broken_count_
= 0;
971 tests_to_retry_
.clear();
972 results_tracker_
.OnTestIterationStarting();
974 ThreadTaskRunnerHandle::Get()->PostTask(
975 FROM_HERE
, Bind(&TestLauncher::RunTests
, Unretained(this)));
978 void TestLauncher::MaybeSaveSummaryAsJSON() {
979 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
980 if (command_line
->HasSwitch(switches::kTestLauncherSummaryOutput
)) {
981 FilePath
summary_path(command_line
->GetSwitchValuePath(
982 switches::kTestLauncherSummaryOutput
));
983 if (!results_tracker_
.SaveSummaryAsJSON(summary_path
)) {
984 LOG(ERROR
) << "Failed to save test launcher output summary.";
989 void TestLauncher::OnLaunchTestProcessFinished(
990 const LaunchChildGTestProcessCallback
& callback
,
992 const TimeDelta
& elapsed_time
,
994 const std::string
& output
) {
995 DCHECK(thread_checker_
.CalledOnValidThread());
997 callback
.Run(exit_code
, elapsed_time
, was_timeout
, output
);
1000 void TestLauncher::OnTestIterationFinished() {
1001 TestResultsTracker::TestStatusMap
tests_by_status(
1002 results_tracker_
.GetTestStatusMapForCurrentIteration());
1003 if (!tests_by_status
[TestResult::TEST_UNKNOWN
].empty())
1004 results_tracker_
.AddGlobalTag(kUnreliableResultsTag
);
1006 // When we retry tests, success is determined by having nothing more
1007 // to retry (everything eventually passed), as opposed to having
1008 // no failures at all.
1009 if (tests_to_retry_
.empty()) {
1010 fprintf(stdout
, "SUCCESS: all tests passed.\n");
1013 // Signal failure, but continue to run all requested test iterations.
1014 // With the summary of all iterations at the end this is a good default.
1015 run_result_
= false;
1018 results_tracker_
.PrintSummaryOfCurrentIteration();
1020 // Kick off the next iteration.
1021 ThreadTaskRunnerHandle::Get()->PostTask(
1022 FROM_HERE
, Bind(&TestLauncher::RunTestIteration
, Unretained(this)));
1025 void TestLauncher::OnOutputTimeout() {
1026 DCHECK(thread_checker_
.CalledOnValidThread());
1028 AutoLock
lock(g_live_processes_lock
.Get());
1030 fprintf(stdout
, "Still waiting for the following processes to finish:\n");
1032 for (std::map
<ProcessHandle
, CommandLine
>::iterator i
=
1033 g_live_processes
.Get().begin();
1034 i
!= g_live_processes
.Get().end();
1037 fwprintf(stdout
, L
"\t%s\n", i
->second
.GetCommandLineString().c_str());
1039 fprintf(stdout
, "\t%s\n", i
->second
.GetCommandLineString().c_str());
1045 // Arm the timer again - otherwise it would fire only once.
1046 watchdog_timer_
.Reset();
1049 std::string
GetTestOutputSnippet(const TestResult
& result
,
1050 const std::string
& full_output
) {
1051 size_t run_pos
= full_output
.find(std::string("[ RUN ] ") +
1053 if (run_pos
== std::string::npos
)
1054 return std::string();
1056 size_t end_pos
= full_output
.find(std::string("[ FAILED ] ") +
1059 // Only clip the snippet to the "OK" message if the test really
1060 // succeeded. It still might have e.g. crashed after printing it.
1061 if (end_pos
== std::string::npos
&&
1062 result
.status
== TestResult::TEST_SUCCESS
) {
1063 end_pos
= full_output
.find(std::string("[ OK ] ") +
1067 if (end_pos
!= std::string::npos
) {
1068 size_t newline_pos
= full_output
.find("\n", end_pos
);
1069 if (newline_pos
!= std::string::npos
)
1070 end_pos
= newline_pos
+ 1;
1073 std::string
snippet(full_output
.substr(run_pos
));
1074 if (end_pos
!= std::string::npos
)
1075 snippet
= full_output
.substr(run_pos
, end_pos
- run_pos
);