Update V8 to version 4.7.42.
[chromium-blink-merge.git] / base / message_loop / message_loop.cc
bloba44f46863e405e0e4f48fb49d3c9f00a60ca4e10
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop/message_loop.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/lazy_instance.h"
12 #include "base/logging.h"
13 #include "base/memory/scoped_ptr.h"
14 #include "base/message_loop/message_pump_default.h"
15 #include "base/metrics/histogram.h"
16 #include "base/metrics/statistics_recorder.h"
17 #include "base/run_loop.h"
18 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
19 #include "base/thread_task_runner_handle.h"
20 #include "base/threading/thread_local.h"
21 #include "base/time/time.h"
22 #include "base/trace_event/trace_event.h"
23 #include "base/tracked_objects.h"
25 #if defined(OS_MACOSX)
26 #include "base/message_loop/message_pump_mac.h"
27 #endif
28 #if defined(OS_POSIX) && !defined(OS_IOS)
29 #include "base/message_loop/message_pump_libevent.h"
30 #endif
31 #if defined(OS_ANDROID)
32 #include "base/message_loop/message_pump_android.h"
33 #endif
34 #if defined(USE_GLIB)
35 #include "base/message_loop/message_pump_glib.h"
36 #endif
38 namespace base {
40 namespace {
42 // A lazily created thread local storage for quick access to a thread's message
43 // loop, if one exists. This should be safe and free of static constructors.
44 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
45 LAZY_INSTANCE_INITIALIZER;
47 // Logical events for Histogram profiling. Run with --message-loop-histogrammer
48 // to get an accounting of messages and actions taken on each thread.
49 const int kTaskRunEvent = 0x1;
50 #if !defined(OS_NACL)
51 const int kTimerEvent = 0x2;
53 // Provide range of message IDs for use in histogramming and debug display.
54 const int kLeastNonZeroMessageId = 1;
55 const int kMaxMessageId = 1099;
56 const int kNumberOfDistinctMessagesDisplayed = 1100;
58 // Provide a macro that takes an expression (such as a constant, or macro
59 // constant) and creates a pair to initialize an array of pairs. In this case,
60 // our pair consists of the expressions value, and the "stringized" version
61 // of the expression (i.e., the expression put in quotes). For example, if
62 // we have:
63 // #define FOO 2
64 // #define BAR 5
65 // then the following:
66 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
67 // will expand to:
68 // {7, "FOO + BAR"}
69 // We use the resulting array as an argument to our histogram, which reads the
70 // number as a bucket identifier, and proceeds to use the corresponding name
71 // in the pair (i.e., the quoted string) when printing out a histogram.
72 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
74 const LinearHistogram::DescriptionPair event_descriptions_[] = {
75 // Provide some pretty print capability in our histogram for our internal
76 // messages.
78 // A few events we handle (kindred to messages), and used to profile actions.
79 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
80 VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
82 {-1, NULL} // The list must be null-terminated, per API to histogram.
84 #endif // !defined(OS_NACL)
86 bool enable_histogrammer_ = false;
88 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
90 #if defined(OS_IOS)
91 typedef MessagePumpIOSForIO MessagePumpForIO;
92 #elif defined(OS_NACL_SFI)
93 typedef MessagePumpDefault MessagePumpForIO;
94 #elif defined(OS_POSIX)
95 typedef MessagePumpLibevent MessagePumpForIO;
96 #endif
98 #if !defined(OS_NACL_SFI)
99 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
100 return static_cast<MessagePumpForIO*>(pump);
102 #endif // !defined(OS_NACL_SFI)
104 scoped_ptr<MessagePump> ReturnPump(scoped_ptr<MessagePump> pump) {
105 return pump;
108 } // namespace
110 //------------------------------------------------------------------------------
112 MessageLoop::TaskObserver::TaskObserver() {
115 MessageLoop::TaskObserver::~TaskObserver() {
118 MessageLoop::DestructionObserver::~DestructionObserver() {
121 //------------------------------------------------------------------------------
123 MessageLoop::MessageLoop(Type type)
124 : MessageLoop(type, MessagePumpFactoryCallback()) {
125 BindToCurrentThread();
128 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
129 : MessageLoop(TYPE_CUSTOM, Bind(&ReturnPump, Passed(&pump))) {
130 BindToCurrentThread();
133 MessageLoop::~MessageLoop() {
134 // current() could be NULL if this message loop is destructed before it is
135 // bound to a thread.
136 DCHECK(current() == this || !current());
138 // iOS just attaches to the loop, it doesn't Run it.
139 // TODO(stuartmorgan): Consider wiring up a Detach().
140 #if !defined(OS_IOS)
141 DCHECK(!run_loop_);
142 #endif
144 #if defined(OS_WIN)
145 if (in_high_res_mode_)
146 Time::ActivateHighResolutionTimer(false);
147 #endif
148 // Clean up any unprocessed tasks, but take care: deleting a task could
149 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
150 // limit on the number of times we will allow a deleted task to generate more
151 // tasks. Normally, we should only pass through this loop once or twice. If
152 // we end up hitting the loop limit, then it is probably due to one task that
153 // is being stubborn. Inspect the queues to see who is left.
154 bool did_work;
155 for (int i = 0; i < 100; ++i) {
156 DeletePendingTasks();
157 ReloadWorkQueue();
158 // If we end up with empty queues, then break out of the loop.
159 did_work = DeletePendingTasks();
160 if (!did_work)
161 break;
163 DCHECK(!did_work);
165 // Let interested parties have one last shot at accessing this.
166 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
167 WillDestroyCurrentMessageLoop());
169 thread_task_runner_handle_.reset();
171 // Tell the incoming queue that we are dying.
172 incoming_task_queue_->WillDestroyCurrentMessageLoop();
173 incoming_task_queue_ = NULL;
174 unbound_task_runner_ = NULL;
175 task_runner_ = NULL;
177 // OK, now make it so that no one can find us.
178 lazy_tls_ptr.Pointer()->Set(NULL);
181 // static
182 MessageLoop* MessageLoop::current() {
183 // TODO(darin): sadly, we cannot enable this yet since people call us even
184 // when they have no intention of using us.
185 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
186 return lazy_tls_ptr.Pointer()->Get();
189 // static
190 void MessageLoop::EnableHistogrammer(bool enable) {
191 enable_histogrammer_ = enable;
194 // static
195 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
196 if (message_pump_for_ui_factory_)
197 return false;
199 message_pump_for_ui_factory_ = factory;
200 return true;
203 // static
204 scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
205 // TODO(rvargas): Get rid of the OS guards.
206 #if defined(USE_GLIB) && !defined(OS_NACL)
207 typedef MessagePumpGlib MessagePumpForUI;
208 #elif defined(OS_LINUX) && !defined(OS_NACL)
209 typedef MessagePumpLibevent MessagePumpForUI;
210 #endif
212 #if defined(OS_IOS) || defined(OS_MACOSX)
213 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
214 #elif defined(OS_NACL)
215 // Currently NaCl doesn't have a UI MessageLoop.
216 // TODO(abarth): Figure out if we need this.
217 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
218 #else
219 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
220 #endif
222 #if defined(OS_MACOSX)
223 // Use an OS native runloop on Mac to support timer coalescing.
224 #define MESSAGE_PUMP_DEFAULT \
225 scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
226 #else
227 #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
228 #endif
230 if (type == MessageLoop::TYPE_UI) {
231 if (message_pump_for_ui_factory_)
232 return message_pump_for_ui_factory_();
233 return MESSAGE_PUMP_UI;
235 if (type == MessageLoop::TYPE_IO)
236 return scoped_ptr<MessagePump>(new MessagePumpForIO());
238 #if defined(OS_ANDROID)
239 if (type == MessageLoop::TYPE_JAVA)
240 return scoped_ptr<MessagePump>(new MessagePumpForUI());
241 #endif
243 DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
244 return MESSAGE_PUMP_DEFAULT;
247 void MessageLoop::AddDestructionObserver(
248 DestructionObserver* destruction_observer) {
249 DCHECK_EQ(this, current());
250 destruction_observers_.AddObserver(destruction_observer);
253 void MessageLoop::RemoveDestructionObserver(
254 DestructionObserver* destruction_observer) {
255 DCHECK_EQ(this, current());
256 destruction_observers_.RemoveObserver(destruction_observer);
259 void MessageLoop::PostTask(
260 const tracked_objects::Location& from_here,
261 const Closure& task) {
262 task_runner_->PostTask(from_here, task);
265 void MessageLoop::PostDelayedTask(
266 const tracked_objects::Location& from_here,
267 const Closure& task,
268 TimeDelta delay) {
269 task_runner_->PostDelayedTask(from_here, task, delay);
272 void MessageLoop::PostNonNestableTask(
273 const tracked_objects::Location& from_here,
274 const Closure& task) {
275 task_runner_->PostNonNestableTask(from_here, task);
278 void MessageLoop::PostNonNestableDelayedTask(
279 const tracked_objects::Location& from_here,
280 const Closure& task,
281 TimeDelta delay) {
282 task_runner_->PostNonNestableDelayedTask(from_here, task, delay);
285 void MessageLoop::Run() {
286 DCHECK(pump_);
287 RunLoop run_loop;
288 run_loop.Run();
291 void MessageLoop::RunUntilIdle() {
292 DCHECK(pump_);
293 RunLoop run_loop;
294 run_loop.RunUntilIdle();
297 void MessageLoop::QuitWhenIdle() {
298 DCHECK_EQ(this, current());
299 if (run_loop_) {
300 run_loop_->quit_when_idle_received_ = true;
301 } else {
302 NOTREACHED() << "Must be inside Run to call Quit";
306 void MessageLoop::QuitNow() {
307 DCHECK_EQ(this, current());
308 if (run_loop_) {
309 pump_->Quit();
310 } else {
311 NOTREACHED() << "Must be inside Run to call Quit";
315 bool MessageLoop::IsType(Type type) const {
316 return type_ == type;
319 static void QuitCurrentWhenIdle() {
320 MessageLoop::current()->QuitWhenIdle();
323 // static
324 Closure MessageLoop::QuitWhenIdleClosure() {
325 return Bind(&QuitCurrentWhenIdle);
328 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
329 if (allowed) {
330 // Kick the native pump just in case we enter a OS-driven nested message
331 // loop.
332 pump_->ScheduleWork();
334 nestable_tasks_allowed_ = allowed;
337 bool MessageLoop::NestableTasksAllowed() const {
338 return nestable_tasks_allowed_;
341 bool MessageLoop::IsNested() {
342 return run_loop_->run_depth_ > 1;
345 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
346 DCHECK_EQ(this, current());
347 task_observers_.AddObserver(task_observer);
350 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
351 DCHECK_EQ(this, current());
352 task_observers_.RemoveObserver(task_observer);
355 bool MessageLoop::is_running() const {
356 DCHECK_EQ(this, current());
357 return run_loop_ != NULL;
360 bool MessageLoop::HasHighResolutionTasks() {
361 return incoming_task_queue_->HasHighResolutionTasks();
364 bool MessageLoop::IsIdleForTesting() {
365 // We only check the incoming queue, since we don't want to lock the work
366 // queue.
367 return incoming_task_queue_->IsIdleForTesting();
370 //------------------------------------------------------------------------------
372 // static
373 scoped_ptr<MessageLoop> MessageLoop::CreateUnbound(
374 Type type, MessagePumpFactoryCallback pump_factory) {
375 return make_scoped_ptr(new MessageLoop(type, pump_factory));
378 MessageLoop::MessageLoop(Type type, MessagePumpFactoryCallback pump_factory)
379 : type_(type),
380 #if defined(OS_WIN)
381 pending_high_res_tasks_(0),
382 in_high_res_mode_(false),
383 #endif
384 nestable_tasks_allowed_(true),
385 #if defined(OS_WIN)
386 os_modal_loop_(false),
387 #endif // OS_WIN
388 pump_factory_(pump_factory),
389 message_histogram_(NULL),
390 run_loop_(NULL),
391 incoming_task_queue_(new internal::IncomingTaskQueue(this)),
392 unbound_task_runner_(
393 new internal::MessageLoopTaskRunner(incoming_task_queue_)),
394 task_runner_(unbound_task_runner_) {
395 // If type is TYPE_CUSTOM non-null pump_factory must be given.
396 DCHECK_EQ(type_ == TYPE_CUSTOM, !pump_factory_.is_null());
399 void MessageLoop::BindToCurrentThread() {
400 DCHECK(!pump_);
401 if (!pump_factory_.is_null())
402 pump_ = pump_factory_.Run();
403 else
404 pump_ = CreateMessagePumpForType(type_);
406 DCHECK(!current()) << "should only have one message loop per thread";
407 lazy_tls_ptr.Pointer()->Set(this);
409 incoming_task_queue_->StartScheduling();
410 unbound_task_runner_->BindToCurrentThread();
411 unbound_task_runner_ = nullptr;
412 SetThreadTaskRunnerHandle();
415 void MessageLoop::SetTaskRunner(
416 scoped_refptr<SingleThreadTaskRunner> task_runner) {
417 DCHECK_EQ(this, current());
418 DCHECK(task_runner->BelongsToCurrentThread());
419 DCHECK(!unbound_task_runner_);
420 task_runner_ = task_runner.Pass();
421 SetThreadTaskRunnerHandle();
424 void MessageLoop::SetThreadTaskRunnerHandle() {
425 DCHECK_EQ(this, current());
426 // Clear the previous thread task runner first, because only one can exist at
427 // a time.
428 thread_task_runner_handle_.reset();
429 thread_task_runner_handle_.reset(new ThreadTaskRunnerHandle(task_runner_));
432 void MessageLoop::RunHandler() {
433 DCHECK_EQ(this, current());
435 StartHistogrammer();
437 #if defined(OS_WIN)
438 if (run_loop_->dispatcher_ && type() == TYPE_UI) {
439 static_cast<MessagePumpForUI*>(pump_.get())->
440 RunWithDispatcher(this, run_loop_->dispatcher_);
441 return;
443 #endif
445 pump_->Run(this);
448 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
449 if (run_loop_->run_depth_ != 1)
450 return false;
452 if (deferred_non_nestable_work_queue_.empty())
453 return false;
455 PendingTask pending_task = deferred_non_nestable_work_queue_.front();
456 deferred_non_nestable_work_queue_.pop();
458 RunTask(pending_task);
459 return true;
462 void MessageLoop::RunTask(const PendingTask& pending_task) {
463 DCHECK(nestable_tasks_allowed_);
465 #if defined(OS_WIN)
466 if (pending_task.is_high_res) {
467 pending_high_res_tasks_--;
468 CHECK_GE(pending_high_res_tasks_, 0);
470 #endif
472 // Execute the task and assume the worst: It is probably not reentrant.
473 nestable_tasks_allowed_ = false;
475 HistogramEvent(kTaskRunEvent);
477 TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
479 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
480 WillProcessTask(pending_task));
481 task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
482 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
483 DidProcessTask(pending_task));
485 nestable_tasks_allowed_ = true;
488 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
489 if (pending_task.nestable || run_loop_->run_depth_ == 1) {
490 RunTask(pending_task);
491 // Show that we ran a task (Note: a new one might arrive as a
492 // consequence!).
493 return true;
496 // We couldn't run the task now because we're in a nested message loop
497 // and the task isn't nestable.
498 deferred_non_nestable_work_queue_.push(pending_task);
499 return false;
502 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
503 // Move to the delayed work queue.
504 delayed_work_queue_.push(pending_task);
507 bool MessageLoop::DeletePendingTasks() {
508 bool did_work = !work_queue_.empty();
509 while (!work_queue_.empty()) {
510 PendingTask pending_task = work_queue_.front();
511 work_queue_.pop();
512 if (!pending_task.delayed_run_time.is_null()) {
513 // We want to delete delayed tasks in the same order in which they would
514 // normally be deleted in case of any funny dependencies between delayed
515 // tasks.
516 AddToDelayedWorkQueue(pending_task);
519 did_work |= !deferred_non_nestable_work_queue_.empty();
520 while (!deferred_non_nestable_work_queue_.empty()) {
521 deferred_non_nestable_work_queue_.pop();
523 did_work |= !delayed_work_queue_.empty();
525 // Historically, we always delete the task regardless of valgrind status. It's
526 // not completely clear why we want to leak them in the loops above. This
527 // code is replicating legacy behavior, and should not be considered
528 // absolutely "correct" behavior. See TODO above about deleting all tasks
529 // when it's safe.
530 while (!delayed_work_queue_.empty()) {
531 delayed_work_queue_.pop();
533 return did_work;
536 void MessageLoop::ReloadWorkQueue() {
537 // We can improve performance of our loading tasks from the incoming queue to
538 // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
539 // load. That reduces the number of locks-per-task significantly when our
540 // queues get large.
541 if (work_queue_.empty()) {
542 #if defined(OS_WIN)
543 pending_high_res_tasks_ +=
544 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
545 #else
546 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
547 #endif
551 void MessageLoop::ScheduleWork() {
552 pump_->ScheduleWork();
555 //------------------------------------------------------------------------------
556 // Method and data for histogramming events and actions taken by each instance
557 // on each thread.
559 void MessageLoop::StartHistogrammer() {
560 #if !defined(OS_NACL) // NaCl build has no metrics code.
561 if (enable_histogrammer_ && !message_histogram_
562 && StatisticsRecorder::IsActive()) {
563 DCHECK(!thread_name_.empty());
564 message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
565 "MsgLoop:" + thread_name_,
566 kLeastNonZeroMessageId, kMaxMessageId,
567 kNumberOfDistinctMessagesDisplayed,
568 HistogramBase::kHexRangePrintingFlag,
569 event_descriptions_);
571 #endif
574 void MessageLoop::HistogramEvent(int event) {
575 #if !defined(OS_NACL)
576 if (message_histogram_)
577 message_histogram_->Add(event);
578 #endif
581 bool MessageLoop::DoWork() {
582 if (!nestable_tasks_allowed_) {
583 // Task can't be executed right now.
584 return false;
587 for (;;) {
588 ReloadWorkQueue();
589 if (work_queue_.empty())
590 break;
592 // Execute oldest task.
593 do {
594 PendingTask pending_task = work_queue_.front();
595 work_queue_.pop();
596 if (!pending_task.delayed_run_time.is_null()) {
597 AddToDelayedWorkQueue(pending_task);
598 // If we changed the topmost task, then it is time to reschedule.
599 if (delayed_work_queue_.top().task.Equals(pending_task.task))
600 pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
601 } else {
602 if (DeferOrRunPendingTask(pending_task))
603 return true;
605 } while (!work_queue_.empty());
608 // Nothing happened.
609 return false;
612 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
613 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
614 recent_time_ = *next_delayed_work_time = TimeTicks();
615 return false;
618 // When we "fall behind", there will be a lot of tasks in the delayed work
619 // queue that are ready to run. To increase efficiency when we fall behind,
620 // we will only call Time::Now() intermittently, and then process all tasks
621 // that are ready to run before calling it again. As a result, the more we
622 // fall behind (and have a lot of ready-to-run delayed tasks), the more
623 // efficient we'll be at handling the tasks.
625 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
626 if (next_run_time > recent_time_) {
627 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
628 if (next_run_time > recent_time_) {
629 *next_delayed_work_time = next_run_time;
630 return false;
634 PendingTask pending_task = delayed_work_queue_.top();
635 delayed_work_queue_.pop();
637 if (!delayed_work_queue_.empty())
638 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
640 return DeferOrRunPendingTask(pending_task);
643 bool MessageLoop::DoIdleWork() {
644 if (ProcessNextDelayedNonNestableTask())
645 return true;
647 if (run_loop_->quit_when_idle_received_)
648 pump_->Quit();
650 // When we return we will do a kernel wait for more tasks.
651 #if defined(OS_WIN)
652 // On Windows we activate the high resolution timer so that the wait
653 // _if_ triggered by the timer happens with good resolution. If we don't
654 // do this the default resolution is 15ms which might not be acceptable
655 // for some tasks.
656 bool high_res = pending_high_res_tasks_ > 0;
657 if (high_res != in_high_res_mode_) {
658 in_high_res_mode_ = high_res;
659 Time::ActivateHighResolutionTimer(in_high_res_mode_);
661 #endif
662 return false;
665 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
666 void(*deleter)(const void*),
667 const void* object) {
668 PostNonNestableTask(from_here, Bind(deleter, object));
671 void MessageLoop::ReleaseSoonInternal(
672 const tracked_objects::Location& from_here,
673 void(*releaser)(const void*),
674 const void* object) {
675 PostNonNestableTask(from_here, Bind(releaser, object));
678 #if !defined(OS_NACL)
679 //------------------------------------------------------------------------------
680 // MessageLoopForUI
682 #if defined(OS_ANDROID)
683 void MessageLoopForUI::Start() {
684 // No Histogram support for UI message loop as it is managed by Java side
685 static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
687 #endif
689 #if defined(OS_IOS)
690 void MessageLoopForUI::Attach() {
691 static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
693 #endif
695 #if defined(USE_OZONE) || (defined(USE_X11) && !defined(USE_GLIB))
696 bool MessageLoopForUI::WatchFileDescriptor(
697 int fd,
698 bool persistent,
699 MessagePumpLibevent::Mode mode,
700 MessagePumpLibevent::FileDescriptorWatcher *controller,
701 MessagePumpLibevent::Watcher *delegate) {
702 return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
704 persistent,
705 mode,
706 controller,
707 delegate);
709 #endif
711 #endif // !defined(OS_NACL)
713 //------------------------------------------------------------------------------
714 // MessageLoopForIO
716 #if !defined(OS_NACL_SFI)
717 void MessageLoopForIO::AddIOObserver(
718 MessageLoopForIO::IOObserver* io_observer) {
719 ToPumpIO(pump_.get())->AddIOObserver(io_observer);
722 void MessageLoopForIO::RemoveIOObserver(
723 MessageLoopForIO::IOObserver* io_observer) {
724 ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
727 #if defined(OS_WIN)
728 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
729 ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
732 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
733 return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
736 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
737 return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
739 #elif defined(OS_POSIX)
740 bool MessageLoopForIO::WatchFileDescriptor(int fd,
741 bool persistent,
742 Mode mode,
743 FileDescriptorWatcher* controller,
744 Watcher* delegate) {
745 return ToPumpIO(pump_.get())->WatchFileDescriptor(
747 persistent,
748 mode,
749 controller,
750 delegate);
752 #endif
754 #endif // !defined(OS_NACL_SFI)
756 } // namespace base