Include inttypes.h unconditionally in format_macros.h - VS2103 now supplies this...
[chromium-blink-merge.git] / base / message_loop / message_loop.cc
blobeb0f968849485ea73ef8a1aab9094bb9527af403
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop/message_loop.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/lazy_instance.h"
12 #include "base/logging.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/message_loop/message_pump_default.h"
15 #include "base/metrics/histogram.h"
16 #include "base/metrics/statistics_recorder.h"
17 #include "base/run_loop.h"
18 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "base/threading/thread_local.h"
21 #include "base/time/time.h"
22 #include "base/tracked_objects.h"
24 #if defined(OS_MACOSX)
25 #include "base/message_loop/message_pump_mac.h"
26 #endif
27 #if defined(OS_POSIX) && !defined(OS_IOS)
28 #include "base/message_loop/message_pump_libevent.h"
29 #endif
30 #if defined(OS_ANDROID)
31 #include "base/message_loop/message_pump_android.h"
32 #endif
33 #if defined(USE_GLIB)
34 #include "base/message_loop/message_pump_glib.h"
35 #endif
37 namespace base {
39 namespace {
41 // A lazily created thread local storage for quick access to a thread's message
42 // loop, if one exists. This should be safe and free of static constructors.
43 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
44 LAZY_INSTANCE_INITIALIZER;
46 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
47 // to get an accounting of messages and actions taken on each thread.
48 const int kTaskRunEvent = 0x1;
49 #if !defined(OS_NACL)
50 const int kTimerEvent = 0x2;
52 // Provide range of message IDs for use in histogramming and debug display.
53 const int kLeastNonZeroMessageId = 1;
54 const int kMaxMessageId = 1099;
55 const int kNumberOfDistinctMessagesDisplayed = 1100;
57 // Provide a macro that takes an expression (such as a constant, or macro
58 // constant) and creates a pair to initalize an array of pairs. In this case,
59 // our pair consists of the expressions value, and the "stringized" version
60 // of the expression (i.e., the exrpression put in quotes). For example, if
61 // we have:
62 // #define FOO 2
63 // #define BAR 5
64 // then the following:
65 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
66 // will expand to:
67 // {7, "FOO + BAR"}
68 // We use the resulting array as an argument to our histogram, which reads the
69 // number as a bucket identifier, and proceeds to use the corresponding name
70 // in the pair (i.e., the quoted string) when printing out a histogram.
71 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
73 const LinearHistogram::DescriptionPair event_descriptions_[] = {
74 // Provide some pretty print capability in our histogram for our internal
75 // messages.
77 // A few events we handle (kindred to messages), and used to profile actions.
78 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
79 VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
81 {-1, NULL} // The list must be null terminated, per API to histogram.
83 #endif // !defined(OS_NACL)
85 bool enable_histogrammer_ = false;
87 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
89 #if defined(OS_IOS)
90 typedef MessagePumpIOSForIO MessagePumpForIO;
91 #elif defined(OS_NACL_SFI)
92 typedef MessagePumpDefault MessagePumpForIO;
93 #elif defined(OS_POSIX)
94 typedef MessagePumpLibevent MessagePumpForIO;
95 #endif
97 #if !defined(OS_NACL_SFI)
98 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
99 return static_cast<MessagePumpForIO*>(pump);
101 #endif // !defined(OS_NACL_SFI)
103 } // namespace
105 //------------------------------------------------------------------------------
107 MessageLoop::TaskObserver::TaskObserver() {
110 MessageLoop::TaskObserver::~TaskObserver() {
113 MessageLoop::DestructionObserver::~DestructionObserver() {
116 //------------------------------------------------------------------------------
118 MessageLoop::MessageLoop(Type type)
119 : type_(type),
120 #if defined(OS_WIN)
121 pending_high_res_tasks_(0),
122 in_high_res_mode_(false),
123 #endif
124 nestable_tasks_allowed_(true),
125 #if defined(OS_WIN)
126 os_modal_loop_(false),
127 #endif // OS_WIN
128 message_histogram_(NULL),
129 run_loop_(NULL) {
130 Init();
132 pump_ = CreateMessagePumpForType(type).Pass();
135 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
136 : pump_(pump.Pass()),
137 type_(TYPE_CUSTOM),
138 #if defined(OS_WIN)
139 pending_high_res_tasks_(0),
140 in_high_res_mode_(false),
141 #endif
142 nestable_tasks_allowed_(true),
143 #if defined(OS_WIN)
144 os_modal_loop_(false),
145 #endif // OS_WIN
146 message_histogram_(NULL),
147 run_loop_(NULL) {
148 DCHECK(pump_.get());
149 Init();
152 MessageLoop::~MessageLoop() {
153 DCHECK_EQ(this, current());
155 // iOS just attaches to the loop, it doesn't Run it.
156 // TODO(stuartmorgan): Consider wiring up a Detach().
157 #if !defined(OS_IOS)
158 DCHECK(!run_loop_);
159 #endif
161 #if defined(OS_WIN)
162 if (in_high_res_mode_)
163 Time::ActivateHighResolutionTimer(false);
164 #endif
165 // Clean up any unprocessed tasks, but take care: deleting a task could
166 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
167 // limit on the number of times we will allow a deleted task to generate more
168 // tasks. Normally, we should only pass through this loop once or twice. If
169 // we end up hitting the loop limit, then it is probably due to one task that
170 // is being stubborn. Inspect the queues to see who is left.
171 bool did_work;
172 for (int i = 0; i < 100; ++i) {
173 DeletePendingTasks();
174 ReloadWorkQueue();
175 // If we end up with empty queues, then break out of the loop.
176 did_work = DeletePendingTasks();
177 if (!did_work)
178 break;
180 DCHECK(!did_work);
182 // Let interested parties have one last shot at accessing this.
183 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
184 WillDestroyCurrentMessageLoop());
186 thread_task_runner_handle_.reset();
188 // Tell the incoming queue that we are dying.
189 incoming_task_queue_->WillDestroyCurrentMessageLoop();
190 incoming_task_queue_ = NULL;
191 message_loop_proxy_ = NULL;
193 // OK, now make it so that no one can find us.
194 lazy_tls_ptr.Pointer()->Set(NULL);
197 // static
198 MessageLoop* MessageLoop::current() {
199 // TODO(darin): sadly, we cannot enable this yet since people call us even
200 // when they have no intention of using us.
201 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
202 return lazy_tls_ptr.Pointer()->Get();
205 // static
206 void MessageLoop::EnableHistogrammer(bool enable) {
207 enable_histogrammer_ = enable;
210 // static
211 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
212 if (message_pump_for_ui_factory_)
213 return false;
215 message_pump_for_ui_factory_ = factory;
216 return true;
219 // static
220 scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
221 // TODO(rvargas): Get rid of the OS guards.
222 #if defined(USE_GLIB) && !defined(OS_NACL)
223 typedef MessagePumpGlib MessagePumpForUI;
224 #elif defined(OS_LINUX) && !defined(OS_NACL)
225 typedef MessagePumpLibevent MessagePumpForUI;
226 #endif
228 #if defined(OS_IOS) || defined(OS_MACOSX)
229 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
230 #elif defined(OS_NACL)
231 // Currently NaCl doesn't have a UI MessageLoop.
232 // TODO(abarth): Figure out if we need this.
233 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
234 #else
235 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
236 #endif
238 #if defined(OS_MACOSX)
239 // Use an OS native runloop on Mac to support timer coalescing.
240 #define MESSAGE_PUMP_DEFAULT \
241 scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
242 #else
243 #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
244 #endif
246 if (type == MessageLoop::TYPE_UI) {
247 if (message_pump_for_ui_factory_)
248 return message_pump_for_ui_factory_();
249 return MESSAGE_PUMP_UI;
251 if (type == MessageLoop::TYPE_IO)
252 return scoped_ptr<MessagePump>(new MessagePumpForIO());
254 #if defined(OS_ANDROID)
255 if (type == MessageLoop::TYPE_JAVA)
256 return scoped_ptr<MessagePump>(new MessagePumpForUI());
257 #endif
259 DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
260 return MESSAGE_PUMP_DEFAULT;
263 void MessageLoop::AddDestructionObserver(
264 DestructionObserver* destruction_observer) {
265 DCHECK_EQ(this, current());
266 destruction_observers_.AddObserver(destruction_observer);
269 void MessageLoop::RemoveDestructionObserver(
270 DestructionObserver* destruction_observer) {
271 DCHECK_EQ(this, current());
272 destruction_observers_.RemoveObserver(destruction_observer);
275 void MessageLoop::PostTask(
276 const tracked_objects::Location& from_here,
277 const Closure& task) {
278 message_loop_proxy_->PostTask(from_here, task);
281 void MessageLoop::PostDelayedTask(
282 const tracked_objects::Location& from_here,
283 const Closure& task,
284 TimeDelta delay) {
285 message_loop_proxy_->PostDelayedTask(from_here, task, delay);
288 void MessageLoop::PostNonNestableTask(
289 const tracked_objects::Location& from_here,
290 const Closure& task) {
291 message_loop_proxy_->PostNonNestableTask(from_here, task);
294 void MessageLoop::PostNonNestableDelayedTask(
295 const tracked_objects::Location& from_here,
296 const Closure& task,
297 TimeDelta delay) {
298 message_loop_proxy_->PostNonNestableDelayedTask(from_here, task, delay);
301 void MessageLoop::Run() {
302 RunLoop run_loop;
303 run_loop.Run();
306 void MessageLoop::RunUntilIdle() {
307 RunLoop run_loop;
308 run_loop.RunUntilIdle();
311 void MessageLoop::QuitWhenIdle() {
312 DCHECK_EQ(this, current());
313 if (run_loop_) {
314 run_loop_->quit_when_idle_received_ = true;
315 } else {
316 NOTREACHED() << "Must be inside Run to call Quit";
320 void MessageLoop::QuitNow() {
321 DCHECK_EQ(this, current());
322 if (run_loop_) {
323 pump_->Quit();
324 } else {
325 NOTREACHED() << "Must be inside Run to call Quit";
329 bool MessageLoop::IsType(Type type) const {
330 return type_ == type;
333 static void QuitCurrentWhenIdle() {
334 MessageLoop::current()->QuitWhenIdle();
337 // static
338 Closure MessageLoop::QuitWhenIdleClosure() {
339 return Bind(&QuitCurrentWhenIdle);
342 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
343 if (allowed) {
344 // Kick the native pump just in case we enter a OS-driven nested message
345 // loop.
346 pump_->ScheduleWork();
348 nestable_tasks_allowed_ = allowed;
351 bool MessageLoop::NestableTasksAllowed() const {
352 return nestable_tasks_allowed_;
355 bool MessageLoop::IsNested() {
356 return run_loop_->run_depth_ > 1;
359 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
360 DCHECK_EQ(this, current());
361 task_observers_.AddObserver(task_observer);
364 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
365 DCHECK_EQ(this, current());
366 task_observers_.RemoveObserver(task_observer);
369 bool MessageLoop::is_running() const {
370 DCHECK_EQ(this, current());
371 return run_loop_ != NULL;
374 bool MessageLoop::HasHighResolutionTasks() {
375 return incoming_task_queue_->HasHighResolutionTasks();
378 bool MessageLoop::IsIdleForTesting() {
379 // We only check the imcoming queue|, since we don't want to lock the work
380 // queue.
381 return incoming_task_queue_->IsIdleForTesting();
384 //------------------------------------------------------------------------------
386 void MessageLoop::Init() {
387 DCHECK(!current()) << "should only have one message loop per thread";
388 lazy_tls_ptr.Pointer()->Set(this);
390 incoming_task_queue_ = new internal::IncomingTaskQueue(this);
391 message_loop_proxy_ =
392 new internal::MessageLoopProxyImpl(incoming_task_queue_);
393 thread_task_runner_handle_.reset(
394 new ThreadTaskRunnerHandle(message_loop_proxy_));
397 void MessageLoop::RunHandler() {
398 DCHECK_EQ(this, current());
400 StartHistogrammer();
402 #if defined(OS_WIN)
403 if (run_loop_->dispatcher_ && type() == TYPE_UI) {
404 static_cast<MessagePumpForUI*>(pump_.get())->
405 RunWithDispatcher(this, run_loop_->dispatcher_);
406 return;
408 #endif
410 pump_->Run(this);
413 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
414 if (run_loop_->run_depth_ != 1)
415 return false;
417 if (deferred_non_nestable_work_queue_.empty())
418 return false;
420 PendingTask pending_task = deferred_non_nestable_work_queue_.front();
421 deferred_non_nestable_work_queue_.pop();
423 RunTask(pending_task);
424 return true;
427 void MessageLoop::RunTask(const PendingTask& pending_task) {
428 DCHECK(nestable_tasks_allowed_);
430 #if defined(OS_WIN)
431 if (pending_task.is_high_res) {
432 pending_high_res_tasks_--;
433 CHECK_GE(pending_high_res_tasks_, 0);
435 #endif
437 // Execute the task and assume the worst: It is probably not reentrant.
438 nestable_tasks_allowed_ = false;
440 HistogramEvent(kTaskRunEvent);
442 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
443 WillProcessTask(pending_task));
444 task_annotator_.RunTask(
445 "MessageLoop::PostTask", "MessageLoop::RunTask", pending_task);
446 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
447 DidProcessTask(pending_task));
449 nestable_tasks_allowed_ = true;
452 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
453 if (pending_task.nestable || run_loop_->run_depth_ == 1) {
454 RunTask(pending_task);
455 // Show that we ran a task (Note: a new one might arrive as a
456 // consequence!).
457 return true;
460 // We couldn't run the task now because we're in a nested message loop
461 // and the task isn't nestable.
462 deferred_non_nestable_work_queue_.push(pending_task);
463 return false;
466 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
467 // Move to the delayed work queue.
468 delayed_work_queue_.push(pending_task);
471 bool MessageLoop::DeletePendingTasks() {
472 bool did_work = !work_queue_.empty();
473 while (!work_queue_.empty()) {
474 PendingTask pending_task = work_queue_.front();
475 work_queue_.pop();
476 if (!pending_task.delayed_run_time.is_null()) {
477 // We want to delete delayed tasks in the same order in which they would
478 // normally be deleted in case of any funny dependencies between delayed
479 // tasks.
480 AddToDelayedWorkQueue(pending_task);
483 did_work |= !deferred_non_nestable_work_queue_.empty();
484 while (!deferred_non_nestable_work_queue_.empty()) {
485 deferred_non_nestable_work_queue_.pop();
487 did_work |= !delayed_work_queue_.empty();
489 // Historically, we always delete the task regardless of valgrind status. It's
490 // not completely clear why we want to leak them in the loops above. This
491 // code is replicating legacy behavior, and should not be considered
492 // absolutely "correct" behavior. See TODO above about deleting all tasks
493 // when it's safe.
494 while (!delayed_work_queue_.empty()) {
495 delayed_work_queue_.pop();
497 return did_work;
500 void MessageLoop::ReloadWorkQueue() {
501 // We can improve performance of our loading tasks from the incoming queue to
502 // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
503 // load. That reduces the number of locks-per-task significantly when our
504 // queues get large.
505 if (work_queue_.empty()) {
506 #if defined(OS_WIN)
507 pending_high_res_tasks_ +=
508 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
509 #else
510 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
511 #endif
515 void MessageLoop::ScheduleWork() {
516 pump_->ScheduleWork();
519 //------------------------------------------------------------------------------
520 // Method and data for histogramming events and actions taken by each instance
521 // on each thread.
523 void MessageLoop::StartHistogrammer() {
524 #if !defined(OS_NACL) // NaCl build has no metrics code.
525 if (enable_histogrammer_ && !message_histogram_
526 && StatisticsRecorder::IsActive()) {
527 DCHECK(!thread_name_.empty());
528 message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
529 "MsgLoop:" + thread_name_,
530 kLeastNonZeroMessageId, kMaxMessageId,
531 kNumberOfDistinctMessagesDisplayed,
532 message_histogram_->kHexRangePrintingFlag,
533 event_descriptions_);
535 #endif
538 void MessageLoop::HistogramEvent(int event) {
539 #if !defined(OS_NACL)
540 if (message_histogram_)
541 message_histogram_->Add(event);
542 #endif
545 bool MessageLoop::DoWork() {
546 if (!nestable_tasks_allowed_) {
547 // Task can't be executed right now.
548 return false;
551 for (;;) {
552 ReloadWorkQueue();
553 if (work_queue_.empty())
554 break;
556 // Execute oldest task.
557 do {
558 PendingTask pending_task = work_queue_.front();
559 work_queue_.pop();
560 if (!pending_task.delayed_run_time.is_null()) {
561 AddToDelayedWorkQueue(pending_task);
562 // If we changed the topmost task, then it is time to reschedule.
563 if (delayed_work_queue_.top().task.Equals(pending_task.task))
564 pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
565 } else {
566 if (DeferOrRunPendingTask(pending_task))
567 return true;
569 } while (!work_queue_.empty());
572 // Nothing happened.
573 return false;
576 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
577 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
578 recent_time_ = *next_delayed_work_time = TimeTicks();
579 return false;
582 // When we "fall behind," there will be a lot of tasks in the delayed work
583 // queue that are ready to run. To increase efficiency when we fall behind,
584 // we will only call Time::Now() intermittently, and then process all tasks
585 // that are ready to run before calling it again. As a result, the more we
586 // fall behind (and have a lot of ready-to-run delayed tasks), the more
587 // efficient we'll be at handling the tasks.
589 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
590 if (next_run_time > recent_time_) {
591 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
592 if (next_run_time > recent_time_) {
593 *next_delayed_work_time = next_run_time;
594 return false;
598 PendingTask pending_task = delayed_work_queue_.top();
599 delayed_work_queue_.pop();
601 if (!delayed_work_queue_.empty())
602 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
604 return DeferOrRunPendingTask(pending_task);
607 bool MessageLoop::DoIdleWork() {
608 if (ProcessNextDelayedNonNestableTask())
609 return true;
611 if (run_loop_->quit_when_idle_received_)
612 pump_->Quit();
614 // When we return we will do a kernel wait for more tasks.
615 #if defined(OS_WIN)
616 // On Windows we activate the high resolution timer so that the wait
617 // _if_ triggered by the timer happens with good resolution. If we don't
618 // do this the default resolution is 15ms which might not be acceptable
619 // for some tasks.
620 bool high_res = pending_high_res_tasks_ > 0;
621 if (high_res != in_high_res_mode_) {
622 in_high_res_mode_ = high_res;
623 Time::ActivateHighResolutionTimer(in_high_res_mode_);
625 #endif
626 return false;
629 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
630 void(*deleter)(const void*),
631 const void* object) {
632 PostNonNestableTask(from_here, Bind(deleter, object));
635 void MessageLoop::ReleaseSoonInternal(
636 const tracked_objects::Location& from_here,
637 void(*releaser)(const void*),
638 const void* object) {
639 PostNonNestableTask(from_here, Bind(releaser, object));
642 #if !defined(OS_NACL)
643 //------------------------------------------------------------------------------
644 // MessageLoopForUI
646 #if defined(OS_ANDROID)
647 void MessageLoopForUI::Start() {
648 // No Histogram support for UI message loop as it is managed by Java side
649 static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
651 #endif
653 #if defined(OS_IOS)
654 void MessageLoopForUI::Attach() {
655 static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
657 #endif
659 #if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
660 bool MessageLoopForUI::WatchFileDescriptor(
661 int fd,
662 bool persistent,
663 MessagePumpLibevent::Mode mode,
664 MessagePumpLibevent::FileDescriptorWatcher *controller,
665 MessagePumpLibevent::Watcher *delegate) {
666 return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
668 persistent,
669 mode,
670 controller,
671 delegate);
673 #endif
675 #endif // !defined(OS_NACL)
677 //------------------------------------------------------------------------------
678 // MessageLoopForIO
680 #if !defined(OS_NACL_SFI)
681 void MessageLoopForIO::AddIOObserver(
682 MessageLoopForIO::IOObserver* io_observer) {
683 ToPumpIO(pump_.get())->AddIOObserver(io_observer);
686 void MessageLoopForIO::RemoveIOObserver(
687 MessageLoopForIO::IOObserver* io_observer) {
688 ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
691 #if defined(OS_WIN)
692 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
693 ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
696 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
697 return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
700 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
701 return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
703 #elif defined(OS_POSIX)
704 bool MessageLoopForIO::WatchFileDescriptor(int fd,
705 bool persistent,
706 Mode mode,
707 FileDescriptorWatcher* controller,
708 Watcher* delegate) {
709 return ToPumpIO(pump_.get())->WatchFileDescriptor(
711 persistent,
712 mode,
713 controller,
714 delegate);
716 #endif
718 #endif // !defined(OS_NACL_SFI)
720 } // namespace base