1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/test/launcher/test_launcher.h"
11 #include "base/at_exit.h"
12 #include "base/bind.h"
13 #include "base/command_line.h"
14 #include "base/environment.h"
15 #include "base/files/file_path.h"
16 #include "base/files/file_util.h"
17 #include "base/files/scoped_file.h"
18 #include "base/format_macros.h"
19 #include "base/hash.h"
20 #include "base/lazy_instance.h"
21 #include "base/location.h"
22 #include "base/logging.h"
23 #include "base/memory/scoped_ptr.h"
24 #include "base/message_loop/message_loop.h"
25 #include "base/process/kill.h"
26 #include "base/process/launch.h"
27 #include "base/single_thread_task_runner.h"
28 #include "base/strings/pattern.h"
29 #include "base/strings/string_number_conversions.h"
30 #include "base/strings/string_split.h"
31 #include "base/strings/string_util.h"
32 #include "base/strings/stringize_macros.h"
33 #include "base/strings/stringprintf.h"
34 #include "base/strings/utf_string_conversions.h"
35 #include "base/test/gtest_util.h"
36 #include "base/test/launcher/test_results_tracker.h"
37 #include "base/test/sequenced_worker_pool_owner.h"
38 #include "base/test/test_switches.h"
39 #include "base/test/test_timeouts.h"
40 #include "base/thread_task_runner_handle.h"
41 #include "base/threading/thread_checker.h"
42 #include "base/time/time.h"
43 #include "testing/gtest/include/gtest/gtest.h"
45 #if defined(OS_MACOSX)
46 #include "base/mac/scoped_nsautorelease_pool.h"
50 #include "base/win/windows_version.h"
55 // See https://groups.google.com/a/chromium.org/d/msg/chromium-dev/nkdTP7sstSc/uT3FaE_sgkAJ .
58 // The environment variable name for the total number of test shards.
59 const char kTestTotalShards
[] = "GTEST_TOTAL_SHARDS";
60 // The environment variable name for the test shard index.
61 const char kTestShardIndex
[] = "GTEST_SHARD_INDEX";
65 // Global tag for test runs where the results are incomplete or unreliable
66 // for any reason, e.g. early exit because of too many broken tests.
67 const char kUnreliableResultsTag
[] = "UNRELIABLE_RESULTS";
69 // Maximum time of no output after which we print list of processes still
70 // running. This deliberately doesn't use TestTimeouts (which is otherwise
71 // a recommended solution), because they can be increased. This would defeat
72 // the purpose of this timeout, which is 1) to avoid buildbot "no output for
73 // X seconds" timeout killing the process 2) help communicate status of
74 // the test launcher to people looking at the output (no output for a long
75 // time is mysterious and gives no info about what is happening) 3) help
76 // debugging in case the process hangs anyway.
77 const int kOutputTimeoutSeconds
= 15;
79 // Limit of output snippet lines when printing to stdout.
80 // Avoids flooding the logs with amount of output that gums up
81 // the infrastructure.
82 const size_t kOutputSnippetLinesLimit
= 5000;
84 // Set of live launch test processes with corresponding lock (it is allowed
85 // for callers to launch processes on different threads).
86 LazyInstance
<std::map
<ProcessHandle
, CommandLine
> > g_live_processes
87 = LAZY_INSTANCE_INITIALIZER
;
88 LazyInstance
<Lock
> g_live_processes_lock
= LAZY_INSTANCE_INITIALIZER
;
91 // Self-pipe that makes it possible to do complex shutdown handling
92 // outside of the signal handler.
93 int g_shutdown_pipe
[2] = { -1, -1 };
95 void ShutdownPipeSignalHandler(int signal
) {
96 HANDLE_EINTR(write(g_shutdown_pipe
[1], "q", 1));
99 void KillSpawnedTestProcesses() {
100 // Keep the lock until exiting the process to prevent further processes
101 // from being spawned.
102 AutoLock
lock(g_live_processes_lock
.Get());
105 "Sending SIGTERM to %" PRIuS
" child processes... ",
106 g_live_processes
.Get().size());
109 for (std::map
<ProcessHandle
, CommandLine
>::iterator i
=
110 g_live_processes
.Get().begin();
111 i
!= g_live_processes
.Get().end();
113 // Send the signal to entire process group.
114 kill((-1) * (i
->first
), SIGTERM
);
118 "done.\nGiving processes a chance to terminate cleanly... ");
121 PlatformThread::Sleep(TimeDelta::FromMilliseconds(500));
123 fprintf(stdout
, "done.\n");
127 "Sending SIGKILL to %" PRIuS
" child processes... ",
128 g_live_processes
.Get().size());
131 for (std::map
<ProcessHandle
, CommandLine
>::iterator i
=
132 g_live_processes
.Get().begin();
133 i
!= g_live_processes
.Get().end();
135 // Send the signal to entire process group.
136 kill((-1) * (i
->first
), SIGKILL
);
139 fprintf(stdout
, "done.\n");
143 // I/O watcher for the reading end of the self-pipe above.
144 // Terminates any launched child processes and exits the process.
145 class SignalFDWatcher
: public MessageLoopForIO::Watcher
{
150 void OnFileCanReadWithoutBlocking(int fd
) override
{
151 fprintf(stdout
, "\nCaught signal. Killing spawned test processes...\n");
154 KillSpawnedTestProcesses();
156 // The signal would normally kill the process, so exit now.
160 void OnFileCanWriteWithoutBlocking(int fd
) override
{ NOTREACHED(); }
163 DISALLOW_COPY_AND_ASSIGN(SignalFDWatcher
);
165 #endif // defined(OS_POSIX)
167 // Parses the environment variable var as an Int32. If it is unset, returns
168 // true. If it is set, unsets it then converts it to Int32 before
169 // returning it in |result|. Returns true on success.
170 bool TakeInt32FromEnvironment(const char* const var
, int32
* result
) {
171 scoped_ptr
<Environment
> env(Environment::Create());
174 if (!env
->GetVar(var
, &str_val
))
177 if (!env
->UnSetVar(var
)) {
178 LOG(ERROR
) << "Invalid environment: we could not unset " << var
<< ".\n";
182 if (!StringToInt(str_val
, result
)) {
183 LOG(ERROR
) << "Invalid environment: " << var
<< " is not an integer.\n";
190 // Unsets the environment variable |name| and returns true on success.
191 // Also returns true if the variable just doesn't exist.
192 bool UnsetEnvironmentVariableIfExists(const std::string
& name
) {
193 scoped_ptr
<Environment
> env(Environment::Create());
196 if (!env
->GetVar(name
.c_str(), &str_val
))
199 return env
->UnSetVar(name
.c_str());
202 // Returns true if bot mode has been requested, i.e. defaults optimized
203 // for continuous integration bots. This way developers don't have to remember
204 // special command-line flags.
205 bool BotModeEnabled() {
206 scoped_ptr
<Environment
> env(Environment::Create());
207 return CommandLine::ForCurrentProcess()->HasSwitch(
208 switches::kTestLauncherBotMode
) ||
209 env
->HasVar("CHROMIUM_TEST_LAUNCHER_BOT_MODE");
212 // Returns command line command line after gtest-specific processing
213 // and applying |wrapper|.
214 CommandLine
PrepareCommandLineForGTest(const CommandLine
& command_line
,
215 const std::string
& wrapper
) {
216 CommandLine
new_command_line(command_line
.GetProgram());
217 CommandLine::SwitchMap switches
= command_line
.GetSwitches();
219 // Strip out gtest_repeat flag - this is handled by the launcher process.
220 switches
.erase(kGTestRepeatFlag
);
222 // Don't try to write the final XML report in child processes.
223 switches
.erase(kGTestOutputFlag
);
225 for (CommandLine::SwitchMap::const_iterator iter
= switches
.begin();
226 iter
!= switches
.end(); ++iter
) {
227 new_command_line
.AppendSwitchNative((*iter
).first
, (*iter
).second
);
230 // Prepend wrapper after last CommandLine quasi-copy operation. CommandLine
231 // does not really support removing switches well, and trying to do that
232 // on a CommandLine with a wrapper is known to break.
233 // TODO(phajdan.jr): Give it a try to support CommandLine removing switches.
235 new_command_line
.PrependWrapper(ASCIIToUTF16(wrapper
));
236 #elif defined(OS_POSIX)
237 new_command_line
.PrependWrapper(wrapper
);
240 return new_command_line
;
243 // Launches a child process using |command_line|. If the child process is still
244 // running after |timeout|, it is terminated and |*was_timeout| is set to true.
245 // Returns exit code of the process.
246 int LaunchChildTestProcessWithOptions(const CommandLine
& command_line
,
247 const LaunchOptions
& options
,
251 #if defined(OS_POSIX)
252 // Make sure an option we rely on is present - see LaunchChildGTestProcess.
253 DCHECK(options
.new_process_group
);
256 LaunchOptions
new_options(options
);
259 DCHECK(!new_options
.job_handle
);
261 win::ScopedHandle job_handle
;
262 if (flags
& TestLauncher::USE_JOB_OBJECTS
) {
263 job_handle
.Set(CreateJobObject(NULL
, NULL
));
264 if (!job_handle
.IsValid()) {
265 LOG(ERROR
) << "Could not create JobObject.";
269 DWORD job_flags
= JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
;
271 // Allow break-away from job since sandbox and few other places rely on it
272 // on Windows versions prior to Windows 8 (which supports nested jobs).
273 if (win::GetVersion() < win::VERSION_WIN8
&&
274 flags
& TestLauncher::ALLOW_BREAKAWAY_FROM_JOB
) {
275 job_flags
|= JOB_OBJECT_LIMIT_BREAKAWAY_OK
;
278 if (!SetJobObjectLimitFlags(job_handle
.Get(), job_flags
)) {
279 LOG(ERROR
) << "Could not SetJobObjectLimitFlags.";
283 new_options
.job_handle
= job_handle
.Get();
285 #endif // defined(OS_WIN)
287 #if defined(OS_LINUX)
288 // To prevent accidental privilege sharing to an untrusted child, processes
289 // are started with PR_SET_NO_NEW_PRIVS. Do not set that here, since this
290 // new child will be privileged and trusted.
291 new_options
.allow_new_privs
= true;
297 // Note how we grab the lock before the process possibly gets created.
298 // This ensures that when the lock is held, ALL the processes are registered
300 AutoLock
lock(g_live_processes_lock
.Get());
302 process
= LaunchProcess(command_line
, new_options
);
303 if (!process
.IsValid())
306 // TODO(rvargas) crbug.com/417532: Don't store process handles.
307 g_live_processes
.Get().insert(std::make_pair(process
.Handle(),
312 if (!process
.WaitForExitWithTimeout(timeout
, &exit_code
)) {
314 exit_code
= -1; // Set a non-zero exit code to signal a failure.
316 // Ensure that the process terminates.
317 process
.Terminate(-1, true);
321 // Note how we grab the log before issuing a possibly broad process kill.
322 // Other code parts that grab the log kill processes, so avoid trying
323 // to do that twice and trigger all kinds of log messages.
324 AutoLock
lock(g_live_processes_lock
.Get());
326 #if defined(OS_POSIX)
327 if (exit_code
!= 0) {
328 // On POSIX, in case the test does not exit cleanly, either due to a crash
329 // or due to it timing out, we need to clean up any child processes that
330 // it might have created. On Windows, child processes are automatically
331 // cleaned up using JobObjects.
332 KillProcessGroup(process
.Handle());
336 g_live_processes
.Get().erase(process
.Handle());
343 const TestLauncher::LaunchChildGTestProcessCallback
& callback
,
345 const TimeDelta
& elapsed_time
,
347 const std::string
& output
) {
348 callback
.Run(exit_code
, elapsed_time
, was_timeout
, output
);
351 void DoLaunchChildTestProcess(
352 const CommandLine
& command_line
,
356 SingleThreadTaskRunner
* task_runner
,
357 const TestLauncher::LaunchChildGTestProcessCallback
& callback
) {
358 TimeTicks start_time
= TimeTicks::Now();
360 // Redirect child process output to a file.
361 FilePath output_file
;
362 CHECK(CreateTemporaryFile(&output_file
));
364 LaunchOptions options
;
366 win::ScopedHandle handle
;
368 if (redirect_stdio
) {
369 // Make the file handle inheritable by the child.
370 SECURITY_ATTRIBUTES sa_attr
;
371 sa_attr
.nLength
= sizeof(SECURITY_ATTRIBUTES
);
372 sa_attr
.lpSecurityDescriptor
= NULL
;
373 sa_attr
.bInheritHandle
= TRUE
;
375 handle
.Set(CreateFile(output_file
.value().c_str(),
377 FILE_SHARE_READ
| FILE_SHARE_DELETE
,
380 FILE_ATTRIBUTE_TEMPORARY
,
382 CHECK(handle
.IsValid());
383 options
.inherit_handles
= true;
384 options
.stdin_handle
= INVALID_HANDLE_VALUE
;
385 options
.stdout_handle
= handle
.Get();
386 options
.stderr_handle
= handle
.Get();
388 #elif defined(OS_POSIX)
389 options
.new_process_group
= true;
390 #if defined(OS_LINUX)
391 options
.kill_on_parent_death
= true;
392 #endif // defined(OS_LINUX)
394 FileHandleMappingVector fds_mapping
;
395 ScopedFD output_file_fd
;
397 if (redirect_stdio
) {
398 output_file_fd
.reset(open(output_file
.value().c_str(), O_RDWR
));
399 CHECK(output_file_fd
.is_valid());
401 fds_mapping
.push_back(std::make_pair(output_file_fd
.get(), STDOUT_FILENO
));
402 fds_mapping
.push_back(std::make_pair(output_file_fd
.get(), STDERR_FILENO
));
403 options
.fds_to_remap
= &fds_mapping
;
407 bool was_timeout
= false;
408 int exit_code
= LaunchChildTestProcessWithOptions(
409 command_line
, options
, flags
, timeout
, &was_timeout
);
411 if (redirect_stdio
) {
413 FlushFileBuffers(handle
.Get());
415 #elif defined(OS_POSIX)
416 output_file_fd
.reset();
420 std::string output_file_contents
;
421 CHECK(ReadFileToString(output_file
, &output_file_contents
));
423 if (!DeleteFile(output_file
, false)) {
424 // This needs to be non-fatal at least for Windows.
425 LOG(WARNING
) << "Failed to delete " << output_file
.AsUTF8Unsafe();
428 // Run target callback on the thread it was originating from, not on
429 // a worker pool thread.
430 task_runner
->PostTask(FROM_HERE
, Bind(&RunCallback
, callback
, exit_code
,
431 TimeTicks::Now() - start_time
,
432 was_timeout
, output_file_contents
));
437 const char kGTestFilterFlag
[] = "gtest_filter";
438 const char kGTestHelpFlag
[] = "gtest_help";
439 const char kGTestListTestsFlag
[] = "gtest_list_tests";
440 const char kGTestRepeatFlag
[] = "gtest_repeat";
441 const char kGTestRunDisabledTestsFlag
[] = "gtest_also_run_disabled_tests";
442 const char kGTestOutputFlag
[] = "gtest_output";
444 TestLauncherDelegate::~TestLauncherDelegate() {
447 TestLauncher::TestLauncher(TestLauncherDelegate
* launcher_delegate
,
448 size_t parallel_jobs
)
449 : launcher_delegate_(launcher_delegate
),
453 test_started_count_(0),
454 test_finished_count_(0),
455 test_success_count_(0),
456 test_broken_count_(0),
459 force_run_broken_tests_(false),
461 watchdog_timer_(FROM_HERE
,
462 TimeDelta::FromSeconds(kOutputTimeoutSeconds
),
464 &TestLauncher::OnOutputTimeout
),
465 parallel_jobs_(parallel_jobs
) {
468 TestLauncher::~TestLauncher() {
469 if (worker_pool_owner_
)
470 worker_pool_owner_
->pool()->Shutdown();
473 bool TestLauncher::Run() {
477 // Value of |cycles_| changes after each iteration. Keep track of the
479 int requested_cycles
= cycles_
;
481 #if defined(OS_POSIX)
482 CHECK_EQ(0, pipe(g_shutdown_pipe
));
484 struct sigaction action
;
485 memset(&action
, 0, sizeof(action
));
486 sigemptyset(&action
.sa_mask
);
487 action
.sa_handler
= &ShutdownPipeSignalHandler
;
489 CHECK_EQ(0, sigaction(SIGINT
, &action
, NULL
));
490 CHECK_EQ(0, sigaction(SIGQUIT
, &action
, NULL
));
491 CHECK_EQ(0, sigaction(SIGTERM
, &action
, NULL
));
493 MessageLoopForIO::FileDescriptorWatcher controller
;
494 SignalFDWatcher watcher
;
496 CHECK(MessageLoopForIO::current()->WatchFileDescriptor(
499 MessageLoopForIO::WATCH_READ
,
502 #endif // defined(OS_POSIX)
504 // Start the watchdog timer.
505 watchdog_timer_
.Reset();
507 ThreadTaskRunnerHandle::Get()->PostTask(
508 FROM_HERE
, Bind(&TestLauncher::RunTestIteration
, Unretained(this)));
510 MessageLoop::current()->Run();
512 if (requested_cycles
!= 1)
513 results_tracker_
.PrintSummaryOfAllIterations();
515 MaybeSaveSummaryAsJSON();
520 void TestLauncher::LaunchChildGTestProcess(
521 const CommandLine
& command_line
,
522 const std::string
& wrapper
,
525 const LaunchChildGTestProcessCallback
& callback
) {
526 DCHECK(thread_checker_
.CalledOnValidThread());
528 // Record the exact command line used to launch the child.
529 CommandLine
new_command_line(
530 PrepareCommandLineForGTest(command_line
, wrapper
));
532 // When running in parallel mode we need to redirect stdio to avoid mixed-up
533 // output. We also always redirect on the bots to get the test output into
535 bool redirect_stdio
= (parallel_jobs_
> 1) || BotModeEnabled();
537 worker_pool_owner_
->pool()->PostWorkerTask(
538 FROM_HERE
, Bind(&DoLaunchChildTestProcess
, new_command_line
, timeout
,
539 flags
, redirect_stdio
, ThreadTaskRunnerHandle::Get(),
540 Bind(&TestLauncher::OnLaunchTestProcessFinished
,
541 Unretained(this), callback
)));
544 void TestLauncher::OnTestFinished(const TestResult
& result
) {
545 ++test_finished_count_
;
547 bool print_snippet
= false;
548 std::string
print_test_stdio("auto");
549 if (CommandLine::ForCurrentProcess()->HasSwitch(
550 switches::kTestLauncherPrintTestStdio
)) {
551 print_test_stdio
= CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
552 switches::kTestLauncherPrintTestStdio
);
554 if (print_test_stdio
== "auto") {
555 print_snippet
= (result
.status
!= TestResult::TEST_SUCCESS
);
556 } else if (print_test_stdio
== "always") {
557 print_snippet
= true;
558 } else if (print_test_stdio
== "never") {
559 print_snippet
= false;
561 LOG(WARNING
) << "Invalid value of " << switches::kTestLauncherPrintTestStdio
562 << ": " << print_test_stdio
;
565 std::vector
<std::string
> snippet_lines
= SplitString(
566 result
.output_snippet
, "\n", base::KEEP_WHITESPACE
,
567 base::SPLIT_WANT_ALL
);
568 if (snippet_lines
.size() > kOutputSnippetLinesLimit
) {
569 size_t truncated_size
= snippet_lines
.size() - kOutputSnippetLinesLimit
;
571 snippet_lines
.begin(),
572 snippet_lines
.begin() + truncated_size
);
573 snippet_lines
.insert(snippet_lines
.begin(), "<truncated>");
575 fprintf(stdout
, "%s", base::JoinString(snippet_lines
, "\n").c_str());
579 if (result
.status
== TestResult::TEST_SUCCESS
) {
580 ++test_success_count_
;
582 tests_to_retry_
.insert(result
.full_name
);
585 results_tracker_
.AddTestResult(result
);
587 // TODO(phajdan.jr): Align counter (padding).
588 std::string
status_line(
589 StringPrintf("[%" PRIuS
"/%" PRIuS
"] %s ",
590 test_finished_count_
,
592 result
.full_name
.c_str()));
593 if (result
.completed()) {
594 status_line
.append(StringPrintf("(%" PRId64
" ms)",
595 result
.elapsed_time
.InMilliseconds()));
596 } else if (result
.status
== TestResult::TEST_TIMEOUT
) {
597 status_line
.append("(TIMED OUT)");
598 } else if (result
.status
== TestResult::TEST_CRASH
) {
599 status_line
.append("(CRASHED)");
600 } else if (result
.status
== TestResult::TEST_SKIPPED
) {
601 status_line
.append("(SKIPPED)");
602 } else if (result
.status
== TestResult::TEST_UNKNOWN
) {
603 status_line
.append("(UNKNOWN)");
605 // Fail very loudly so it's not ignored.
606 CHECK(false) << "Unhandled test result status: " << result
.status
;
608 fprintf(stdout
, "%s\n", status_line
.c_str());
611 // We just printed a status line, reset the watchdog timer.
612 watchdog_timer_
.Reset();
614 // Do not waste time on timeouts. We include tests with unknown results here
615 // because sometimes (e.g. hang in between unit tests) that's how a timeout
617 if (result
.status
== TestResult::TEST_TIMEOUT
||
618 result
.status
== TestResult::TEST_UNKNOWN
) {
619 test_broken_count_
++;
621 size_t broken_threshold
=
622 std::max(static_cast<size_t>(20), test_started_count_
/ 10);
623 if (!force_run_broken_tests_
&& test_broken_count_
>= broken_threshold
) {
624 fprintf(stdout
, "Too many badly broken tests (%" PRIuS
"), exiting now.\n",
628 #if defined(OS_POSIX)
629 KillSpawnedTestProcesses();
630 #endif // defined(OS_POSIX)
632 results_tracker_
.AddGlobalTag("BROKEN_TEST_EARLY_EXIT");
633 results_tracker_
.AddGlobalTag(kUnreliableResultsTag
);
634 MaybeSaveSummaryAsJSON();
639 if (test_finished_count_
!= test_started_count_
)
642 if (tests_to_retry_
.empty() || retry_count_
>= retry_limit_
) {
643 OnTestIterationFinished();
647 if (!force_run_broken_tests_
&& tests_to_retry_
.size() >= broken_threshold
) {
649 "Too many failing tests (%" PRIuS
"), skipping retries.\n",
650 tests_to_retry_
.size());
653 results_tracker_
.AddGlobalTag("BROKEN_TEST_SKIPPED_RETRIES");
654 results_tracker_
.AddGlobalTag(kUnreliableResultsTag
);
656 OnTestIterationFinished();
662 std::vector
<std::string
> test_names(tests_to_retry_
.begin(),
663 tests_to_retry_
.end());
665 tests_to_retry_
.clear();
667 size_t retry_started_count
= launcher_delegate_
->RetryTests(this, test_names
);
668 if (retry_started_count
== 0) {
669 // Signal failure, but continue to run all requested test iterations.
670 // With the summary of all iterations at the end this is a good default.
673 OnTestIterationFinished();
677 fprintf(stdout
, "Retrying %" PRIuS
" test%s (retry #%" PRIuS
")\n",
679 retry_started_count
> 1 ? "s" : "",
683 test_started_count_
+= retry_started_count
;
686 bool TestLauncher::Init() {
687 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
689 // Initialize sharding. Command line takes precedence over legacy environment
691 if (command_line
->HasSwitch(switches::kTestLauncherTotalShards
) &&
692 command_line
->HasSwitch(switches::kTestLauncherShardIndex
)) {
694 command_line
->GetSwitchValueASCII(
695 switches::kTestLauncherTotalShards
),
697 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherTotalShards
;
701 command_line
->GetSwitchValueASCII(
702 switches::kTestLauncherShardIndex
),
704 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherShardIndex
;
708 "Using sharding settings from command line. This is shard %d/%d\n",
709 shard_index_
, total_shards_
);
712 if (!TakeInt32FromEnvironment(kTestTotalShards
, &total_shards_
))
714 if (!TakeInt32FromEnvironment(kTestShardIndex
, &shard_index_
))
717 "Using sharding settings from environment. This is shard %d/%d\n",
718 shard_index_
, total_shards_
);
721 if (shard_index_
< 0 ||
723 shard_index_
>= total_shards_
) {
724 LOG(ERROR
) << "Invalid sharding settings: we require 0 <= "
725 << kTestShardIndex
<< " < " << kTestTotalShards
726 << ", but you have " << kTestShardIndex
<< "=" << shard_index_
727 << ", " << kTestTotalShards
<< "=" << total_shards_
<< ".\n";
731 // Make sure we don't pass any sharding-related environment to the child
732 // processes. This test launcher implements the sharding completely.
733 CHECK(UnsetEnvironmentVariableIfExists("GTEST_TOTAL_SHARDS"));
734 CHECK(UnsetEnvironmentVariableIfExists("GTEST_SHARD_INDEX"));
736 if (command_line
->HasSwitch(kGTestRepeatFlag
) &&
737 !StringToInt(command_line
->GetSwitchValueASCII(kGTestRepeatFlag
),
739 LOG(ERROR
) << "Invalid value for " << kGTestRepeatFlag
;
743 if (command_line
->HasSwitch(switches::kTestLauncherRetryLimit
)) {
744 int retry_limit
= -1;
745 if (!StringToInt(command_line
->GetSwitchValueASCII(
746 switches::kTestLauncherRetryLimit
), &retry_limit
) ||
748 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherRetryLimit
;
752 retry_limit_
= retry_limit
;
753 } else if (!command_line
->HasSwitch(kGTestFilterFlag
) || BotModeEnabled()) {
754 // Retry failures 3 times by default if we are running all of the tests or
759 if (command_line
->HasSwitch(switches::kTestLauncherForceRunBrokenTests
))
760 force_run_broken_tests_
= true;
762 if (command_line
->HasSwitch(switches::kTestLauncherJobs
)) {
764 if (!StringToInt(command_line
->GetSwitchValueASCII(
765 switches::kTestLauncherJobs
), &jobs
) ||
767 LOG(ERROR
) << "Invalid value for " << switches::kTestLauncherJobs
;
771 parallel_jobs_
= jobs
;
772 } else if (command_line
->HasSwitch(kGTestFilterFlag
) && !BotModeEnabled()) {
773 // Do not run jobs in parallel by default if we are running a subset of
774 // the tests and if bot mode is off.
778 fprintf(stdout
, "Using %" PRIuS
" parallel jobs.\n", parallel_jobs_
);
780 worker_pool_owner_
.reset(
781 new SequencedWorkerPoolOwner(parallel_jobs_
, "test_launcher"));
783 if (command_line
->HasSwitch(switches::kTestLauncherFilterFile
) &&
784 command_line
->HasSwitch(kGTestFilterFlag
)) {
785 LOG(ERROR
) << "Only one of --test-launcher-filter-file and --gtest_filter "
786 << "at a time is allowed.";
790 if (command_line
->HasSwitch(switches::kTestLauncherFilterFile
)) {
792 if (!ReadFileToString(
793 command_line
->GetSwitchValuePath(switches::kTestLauncherFilterFile
),
795 LOG(ERROR
) << "Failed to read the filter file.";
799 std::vector
<std::string
> filter_lines
= SplitString(
800 filter
, "\n", base::TRIM_WHITESPACE
, base::SPLIT_WANT_ALL
);
801 for (size_t i
= 0; i
< filter_lines
.size(); i
++) {
802 if (filter_lines
[i
].empty())
805 if (filter_lines
[i
][0] == '-')
806 negative_test_filter_
.push_back(filter_lines
[i
].substr(1));
808 positive_test_filter_
.push_back(filter_lines
[i
]);
811 // Split --gtest_filter at '-', if there is one, to separate into
812 // positive filter and negative filter portions.
813 std::string filter
= command_line
->GetSwitchValueASCII(kGTestFilterFlag
);
814 size_t dash_pos
= filter
.find('-');
815 if (dash_pos
== std::string::npos
) {
816 positive_test_filter_
= SplitString(
817 filter
, ":", base::TRIM_WHITESPACE
, base::SPLIT_WANT_ALL
);
819 // Everything up to the dash.
820 positive_test_filter_
= SplitString(
821 filter
.substr(0, dash_pos
), ":", base::TRIM_WHITESPACE
,
822 base::SPLIT_WANT_ALL
);
824 // Everything after the dash.
825 negative_test_filter_
= SplitString(
826 filter
.substr(dash_pos
+ 1), ":", base::TRIM_WHITESPACE
,
827 base::SPLIT_WANT_ALL
);
831 if (!launcher_delegate_
->GetTests(&tests_
)) {
832 LOG(ERROR
) << "Failed to get list of tests.";
836 if (!results_tracker_
.Init(*command_line
)) {
837 LOG(ERROR
) << "Failed to initialize test results tracker.";
842 results_tracker_
.AddGlobalTag("MODE_RELEASE");
844 results_tracker_
.AddGlobalTag("MODE_DEBUG");
847 // Operating systems (sorted alphabetically).
848 // Note that they can deliberately overlap, e.g. OS_LINUX is a subset
850 #if defined(OS_ANDROID)
851 results_tracker_
.AddGlobalTag("OS_ANDROID");
855 results_tracker_
.AddGlobalTag("OS_BSD");
858 #if defined(OS_FREEBSD)
859 results_tracker_
.AddGlobalTag("OS_FREEBSD");
863 results_tracker_
.AddGlobalTag("OS_IOS");
866 #if defined(OS_LINUX)
867 results_tracker_
.AddGlobalTag("OS_LINUX");
870 #if defined(OS_MACOSX)
871 results_tracker_
.AddGlobalTag("OS_MACOSX");
875 results_tracker_
.AddGlobalTag("OS_NACL");
878 #if defined(OS_OPENBSD)
879 results_tracker_
.AddGlobalTag("OS_OPENBSD");
882 #if defined(OS_POSIX)
883 results_tracker_
.AddGlobalTag("OS_POSIX");
886 #if defined(OS_SOLARIS)
887 results_tracker_
.AddGlobalTag("OS_SOLARIS");
891 results_tracker_
.AddGlobalTag("OS_WIN");
895 #if defined(ARCH_CPU_32_BITS)
896 results_tracker_
.AddGlobalTag("CPU_32_BITS");
899 #if defined(ARCH_CPU_64_BITS)
900 results_tracker_
.AddGlobalTag("CPU_64_BITS");
906 void TestLauncher::RunTests() {
907 std::vector
<std::string
> test_names
;
908 for (size_t i
= 0; i
< tests_
.size(); i
++) {
909 std::string test_name
= FormatFullTestName(
910 tests_
[i
].test_case_name
, tests_
[i
].test_name
);
912 results_tracker_
.AddTest(test_name
, tests_
[i
].file
, tests_
[i
].line
);
914 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
915 if (test_name
.find("DISABLED") != std::string::npos
) {
916 results_tracker_
.AddDisabledTest(test_name
);
918 // Skip disabled tests unless explicitly requested.
919 if (!command_line
->HasSwitch(kGTestRunDisabledTestsFlag
))
923 if (!launcher_delegate_
->ShouldRunTest(
924 tests_
[i
].test_case_name
, tests_
[i
].test_name
)) {
928 // Skip the test that doesn't match the filter (if given).
929 if (!positive_test_filter_
.empty()) {
931 for (size_t k
= 0; k
< positive_test_filter_
.size(); ++k
) {
932 if (MatchPattern(test_name
, positive_test_filter_
[k
])) {
941 bool excluded
= false;
942 for (size_t k
= 0; k
< negative_test_filter_
.size(); ++k
) {
943 if (MatchPattern(test_name
, negative_test_filter_
[k
])) {
951 if (Hash(test_name
) % total_shards_
!= static_cast<uint32
>(shard_index_
))
954 test_names
.push_back(test_name
);
957 test_started_count_
= launcher_delegate_
->RunTests(this, test_names
);
959 if (test_started_count_
== 0) {
960 fprintf(stdout
, "0 tests run\n");
963 // No tests have actually been started, so kick off the next iteration.
964 ThreadTaskRunnerHandle::Get()->PostTask(
965 FROM_HERE
, Bind(&TestLauncher::RunTestIteration
, Unretained(this)));
969 void TestLauncher::RunTestIteration() {
971 MessageLoop::current()->Quit();
975 // Special value "-1" means "repeat indefinitely".
976 cycles_
= (cycles_
== -1) ? cycles_
: cycles_
- 1;
978 test_started_count_
= 0;
979 test_finished_count_
= 0;
980 test_success_count_
= 0;
981 test_broken_count_
= 0;
983 tests_to_retry_
.clear();
984 results_tracker_
.OnTestIterationStarting();
986 ThreadTaskRunnerHandle::Get()->PostTask(
987 FROM_HERE
, Bind(&TestLauncher::RunTests
, Unretained(this)));
990 void TestLauncher::MaybeSaveSummaryAsJSON() {
991 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
992 if (command_line
->HasSwitch(switches::kTestLauncherSummaryOutput
)) {
993 FilePath
summary_path(command_line
->GetSwitchValuePath(
994 switches::kTestLauncherSummaryOutput
));
995 if (!results_tracker_
.SaveSummaryAsJSON(summary_path
)) {
996 LOG(ERROR
) << "Failed to save test launcher output summary.";
1001 void TestLauncher::OnLaunchTestProcessFinished(
1002 const LaunchChildGTestProcessCallback
& callback
,
1004 const TimeDelta
& elapsed_time
,
1006 const std::string
& output
) {
1007 DCHECK(thread_checker_
.CalledOnValidThread());
1009 callback
.Run(exit_code
, elapsed_time
, was_timeout
, output
);
1012 void TestLauncher::OnTestIterationFinished() {
1013 TestResultsTracker::TestStatusMap
tests_by_status(
1014 results_tracker_
.GetTestStatusMapForCurrentIteration());
1015 if (!tests_by_status
[TestResult::TEST_UNKNOWN
].empty())
1016 results_tracker_
.AddGlobalTag(kUnreliableResultsTag
);
1018 // When we retry tests, success is determined by having nothing more
1019 // to retry (everything eventually passed), as opposed to having
1020 // no failures at all.
1021 if (tests_to_retry_
.empty()) {
1022 fprintf(stdout
, "SUCCESS: all tests passed.\n");
1025 // Signal failure, but continue to run all requested test iterations.
1026 // With the summary of all iterations at the end this is a good default.
1027 run_result_
= false;
1030 results_tracker_
.PrintSummaryOfCurrentIteration();
1032 // Kick off the next iteration.
1033 ThreadTaskRunnerHandle::Get()->PostTask(
1034 FROM_HERE
, Bind(&TestLauncher::RunTestIteration
, Unretained(this)));
1037 void TestLauncher::OnOutputTimeout() {
1038 DCHECK(thread_checker_
.CalledOnValidThread());
1040 AutoLock
lock(g_live_processes_lock
.Get());
1042 fprintf(stdout
, "Still waiting for the following processes to finish:\n");
1044 for (std::map
<ProcessHandle
, CommandLine
>::iterator i
=
1045 g_live_processes
.Get().begin();
1046 i
!= g_live_processes
.Get().end();
1049 fwprintf(stdout
, L
"\t%s\n", i
->second
.GetCommandLineString().c_str());
1051 fprintf(stdout
, "\t%s\n", i
->second
.GetCommandLineString().c_str());
1057 // Arm the timer again - otherwise it would fire only once.
1058 watchdog_timer_
.Reset();
1061 std::string
GetTestOutputSnippet(const TestResult
& result
,
1062 const std::string
& full_output
) {
1063 size_t run_pos
= full_output
.find(std::string("[ RUN ] ") +
1065 if (run_pos
== std::string::npos
)
1066 return std::string();
1068 size_t end_pos
= full_output
.find(std::string("[ FAILED ] ") +
1071 // Only clip the snippet to the "OK" message if the test really
1072 // succeeded. It still might have e.g. crashed after printing it.
1073 if (end_pos
== std::string::npos
&&
1074 result
.status
== TestResult::TEST_SUCCESS
) {
1075 end_pos
= full_output
.find(std::string("[ OK ] ") +
1079 if (end_pos
!= std::string::npos
) {
1080 size_t newline_pos
= full_output
.find("\n", end_pos
);
1081 if (newline_pos
!= std::string::npos
)
1082 end_pos
= newline_pos
+ 1;
1085 std::string
snippet(full_output
.substr(run_pos
));
1086 if (end_pos
!= std::string::npos
)
1087 snippet
= full_output
.substr(run_pos
, end_pos
- run_pos
);