Added unit test for DevTools' ephemeral port support.
[chromium-blink-merge.git] / base / message_loop / message_loop.cc
blobdd1a393ab08633514df9d6d021a1be7ff4dbd23f
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop/message_loop.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/debug/alias.h"
12 #include "base/debug/trace_event.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h"
15 #include "base/memory/scoped_ptr.h"
16 #include "base/message_loop/message_pump_default.h"
17 #include "base/metrics/histogram.h"
18 #include "base/metrics/statistics_recorder.h"
19 #include "base/run_loop.h"
20 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/threading/thread_local.h"
23 #include "base/time/time.h"
24 #include "base/tracked_objects.h"
26 #if defined(OS_MACOSX)
27 #include "base/message_loop/message_pump_mac.h"
28 #endif
29 #if defined(OS_POSIX) && !defined(OS_IOS)
30 #include "base/message_loop/message_pump_libevent.h"
31 #endif
32 #if defined(OS_ANDROID)
33 #include "base/message_loop/message_pump_android.h"
34 #endif
35 #if defined(USE_GLIB)
36 #include "base/message_loop/message_pump_glib.h"
37 #endif
39 namespace base {
41 namespace {
43 // A lazily created thread local storage for quick access to a thread's message
44 // loop, if one exists. This should be safe and free of static constructors.
45 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
46 LAZY_INSTANCE_INITIALIZER;
48 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
49 // to get an accounting of messages and actions taken on each thread.
50 const int kTaskRunEvent = 0x1;
51 #if !defined(OS_NACL)
52 const int kTimerEvent = 0x2;
54 // Provide range of message IDs for use in histogramming and debug display.
55 const int kLeastNonZeroMessageId = 1;
56 const int kMaxMessageId = 1099;
57 const int kNumberOfDistinctMessagesDisplayed = 1100;
59 // Provide a macro that takes an expression (such as a constant, or macro
60 // constant) and creates a pair to initalize an array of pairs. In this case,
61 // our pair consists of the expressions value, and the "stringized" version
62 // of the expression (i.e., the exrpression put in quotes). For example, if
63 // we have:
64 // #define FOO 2
65 // #define BAR 5
66 // then the following:
67 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
68 // will expand to:
69 // {7, "FOO + BAR"}
70 // We use the resulting array as an argument to our histogram, which reads the
71 // number as a bucket identifier, and proceeds to use the corresponding name
72 // in the pair (i.e., the quoted string) when printing out a histogram.
73 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
75 const LinearHistogram::DescriptionPair event_descriptions_[] = {
76 // Provide some pretty print capability in our histogram for our internal
77 // messages.
79 // A few events we handle (kindred to messages), and used to profile actions.
80 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
81 VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
83 {-1, NULL} // The list must be null terminated, per API to histogram.
85 #endif // !defined(OS_NACL)
87 bool enable_histogrammer_ = false;
89 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
91 // Returns true if MessagePump::ScheduleWork() must be called one
92 // time for every task that is added to the MessageLoop incoming queue.
93 bool AlwaysNotifyPump(MessageLoop::Type type) {
94 #if defined(OS_ANDROID)
95 return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
96 #else
97 return false;
98 #endif
101 #if defined(OS_IOS)
102 typedef MessagePumpIOSForIO MessagePumpForIO;
103 #elif defined(OS_NACL)
104 typedef MessagePumpDefault MessagePumpForIO;
105 #elif defined(OS_POSIX)
106 typedef MessagePumpLibevent MessagePumpForIO;
107 #endif
109 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
110 return static_cast<MessagePumpForIO*>(pump);
113 } // namespace
115 //------------------------------------------------------------------------------
117 MessageLoop::TaskObserver::TaskObserver() {
120 MessageLoop::TaskObserver::~TaskObserver() {
123 MessageLoop::DestructionObserver::~DestructionObserver() {
126 //------------------------------------------------------------------------------
128 MessageLoop::MessageLoop(Type type)
129 : type_(type),
130 nestable_tasks_allowed_(true),
131 #if defined(OS_WIN)
132 os_modal_loop_(false),
133 #endif // OS_WIN
134 message_histogram_(NULL),
135 run_loop_(NULL) {
136 Init();
138 pump_ = CreateMessagePumpForType(type).Pass();
141 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
142 : pump_(pump.Pass()),
143 type_(TYPE_CUSTOM),
144 nestable_tasks_allowed_(true),
145 #if defined(OS_WIN)
146 os_modal_loop_(false),
147 #endif // OS_WIN
148 message_histogram_(NULL),
149 run_loop_(NULL) {
150 DCHECK(pump_.get());
151 Init();
154 MessageLoop::~MessageLoop() {
155 DCHECK_EQ(this, current());
157 DCHECK(!run_loop_);
159 // Clean up any unprocessed tasks, but take care: deleting a task could
160 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
161 // limit on the number of times we will allow a deleted task to generate more
162 // tasks. Normally, we should only pass through this loop once or twice. If
163 // we end up hitting the loop limit, then it is probably due to one task that
164 // is being stubborn. Inspect the queues to see who is left.
165 bool did_work;
166 for (int i = 0; i < 100; ++i) {
167 DeletePendingTasks();
168 ReloadWorkQueue();
169 // If we end up with empty queues, then break out of the loop.
170 did_work = DeletePendingTasks();
171 if (!did_work)
172 break;
174 DCHECK(!did_work);
176 // Let interested parties have one last shot at accessing this.
177 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
178 WillDestroyCurrentMessageLoop());
180 thread_task_runner_handle_.reset();
182 // Tell the incoming queue that we are dying.
183 incoming_task_queue_->WillDestroyCurrentMessageLoop();
184 incoming_task_queue_ = NULL;
185 message_loop_proxy_ = NULL;
187 // OK, now make it so that no one can find us.
188 lazy_tls_ptr.Pointer()->Set(NULL);
191 // static
192 MessageLoop* MessageLoop::current() {
193 // TODO(darin): sadly, we cannot enable this yet since people call us even
194 // when they have no intention of using us.
195 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
196 return lazy_tls_ptr.Pointer()->Get();
199 // static
200 void MessageLoop::EnableHistogrammer(bool enable) {
201 enable_histogrammer_ = enable;
204 // static
205 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
206 if (message_pump_for_ui_factory_)
207 return false;
209 message_pump_for_ui_factory_ = factory;
210 return true;
213 // static
214 scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
215 // TODO(rvargas): Get rid of the OS guards.
216 #if defined(USE_GLIB) && !defined(OS_NACL)
217 typedef MessagePumpGlib MessagePumpForUI;
218 #elif defined(OS_LINUX) && !defined(OS_NACL)
219 typedef MessagePumpLibevent MessagePumpForUI;
220 #endif
222 #if defined(OS_IOS) || defined(OS_MACOSX)
223 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
224 #elif defined(OS_NACL)
225 // Currently NaCl doesn't have a UI MessageLoop.
226 // TODO(abarth): Figure out if we need this.
227 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
228 #else
229 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
230 #endif
232 if (type == MessageLoop::TYPE_UI) {
233 if (message_pump_for_ui_factory_)
234 return message_pump_for_ui_factory_();
235 return MESSAGE_PUMP_UI;
237 if (type == MessageLoop::TYPE_IO)
238 return scoped_ptr<MessagePump>(new MessagePumpForIO());
240 #if defined(OS_ANDROID)
241 if (type == MessageLoop::TYPE_JAVA)
242 return scoped_ptr<MessagePump>(new MessagePumpForUI());
243 #endif
245 DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
246 return scoped_ptr<MessagePump>(new MessagePumpDefault());
249 void MessageLoop::AddDestructionObserver(
250 DestructionObserver* destruction_observer) {
251 DCHECK_EQ(this, current());
252 destruction_observers_.AddObserver(destruction_observer);
255 void MessageLoop::RemoveDestructionObserver(
256 DestructionObserver* destruction_observer) {
257 DCHECK_EQ(this, current());
258 destruction_observers_.RemoveObserver(destruction_observer);
261 void MessageLoop::PostTask(
262 const tracked_objects::Location& from_here,
263 const Closure& task) {
264 DCHECK(!task.is_null()) << from_here.ToString();
265 incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), true);
268 void MessageLoop::PostDelayedTask(
269 const tracked_objects::Location& from_here,
270 const Closure& task,
271 TimeDelta delay) {
272 DCHECK(!task.is_null()) << from_here.ToString();
273 incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, true);
276 void MessageLoop::PostNonNestableTask(
277 const tracked_objects::Location& from_here,
278 const Closure& task) {
279 DCHECK(!task.is_null()) << from_here.ToString();
280 incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), false);
283 void MessageLoop::PostNonNestableDelayedTask(
284 const tracked_objects::Location& from_here,
285 const Closure& task,
286 TimeDelta delay) {
287 DCHECK(!task.is_null()) << from_here.ToString();
288 incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, false);
291 void MessageLoop::Run() {
292 RunLoop run_loop;
293 run_loop.Run();
296 void MessageLoop::RunUntilIdle() {
297 RunLoop run_loop;
298 run_loop.RunUntilIdle();
301 void MessageLoop::QuitWhenIdle() {
302 DCHECK_EQ(this, current());
303 if (run_loop_) {
304 run_loop_->quit_when_idle_received_ = true;
305 } else {
306 NOTREACHED() << "Must be inside Run to call Quit";
310 void MessageLoop::QuitNow() {
311 DCHECK_EQ(this, current());
312 if (run_loop_) {
313 pump_->Quit();
314 } else {
315 NOTREACHED() << "Must be inside Run to call Quit";
319 bool MessageLoop::IsType(Type type) const {
320 return type_ == type;
323 static void QuitCurrentWhenIdle() {
324 MessageLoop::current()->QuitWhenIdle();
327 // static
328 Closure MessageLoop::QuitWhenIdleClosure() {
329 return Bind(&QuitCurrentWhenIdle);
332 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
333 if (allowed) {
334 // Kick the native pump just in case we enter a OS-driven nested message
335 // loop.
336 pump_->ScheduleWork();
338 nestable_tasks_allowed_ = allowed;
341 bool MessageLoop::NestableTasksAllowed() const {
342 return nestable_tasks_allowed_;
345 bool MessageLoop::IsNested() {
346 return run_loop_->run_depth_ > 1;
349 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
350 DCHECK_EQ(this, current());
351 task_observers_.AddObserver(task_observer);
354 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
355 DCHECK_EQ(this, current());
356 task_observers_.RemoveObserver(task_observer);
359 bool MessageLoop::is_running() const {
360 DCHECK_EQ(this, current());
361 return run_loop_ != NULL;
364 bool MessageLoop::IsHighResolutionTimerEnabledForTesting() {
365 return incoming_task_queue_->IsHighResolutionTimerEnabledForTesting();
368 bool MessageLoop::IsIdleForTesting() {
369 // We only check the imcoming queue|, since we don't want to lock the work
370 // queue.
371 return incoming_task_queue_->IsIdleForTesting();
374 //------------------------------------------------------------------------------
376 void MessageLoop::Init() {
377 DCHECK(!current()) << "should only have one message loop per thread";
378 lazy_tls_ptr.Pointer()->Set(this);
380 incoming_task_queue_ = new internal::IncomingTaskQueue(this);
381 message_loop_proxy_ =
382 new internal::MessageLoopProxyImpl(incoming_task_queue_);
383 thread_task_runner_handle_.reset(
384 new ThreadTaskRunnerHandle(message_loop_proxy_));
387 void MessageLoop::RunHandler() {
388 DCHECK_EQ(this, current());
390 StartHistogrammer();
392 #if defined(OS_WIN)
393 if (run_loop_->dispatcher_ && type() == TYPE_UI) {
394 static_cast<MessagePumpForUI*>(pump_.get())->
395 RunWithDispatcher(this, run_loop_->dispatcher_);
396 return;
398 #endif
400 pump_->Run(this);
403 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
404 if (run_loop_->run_depth_ != 1)
405 return false;
407 if (deferred_non_nestable_work_queue_.empty())
408 return false;
410 PendingTask pending_task = deferred_non_nestable_work_queue_.front();
411 deferred_non_nestable_work_queue_.pop();
413 RunTask(pending_task);
414 return true;
417 void MessageLoop::RunTask(const PendingTask& pending_task) {
418 tracked_objects::TrackedTime start_time =
419 tracked_objects::ThreadData::NowForStartOfRun(pending_task.birth_tally);
421 TRACE_EVENT_FLOW_END1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
422 "MessageLoop::PostTask", TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
423 "queue_duration",
424 (start_time - pending_task.EffectiveTimePosted()).InMilliseconds());
425 // When tracing memory for posted tasks it's more valuable to attribute the
426 // memory allocations to the source function than generically to "RunTask".
427 TRACE_EVENT_WITH_MEMORY_TAG2(
428 "toplevel", "MessageLoop::RunTask",
429 pending_task.posted_from.function_name(), // Name for memory tracking.
430 "src_file", pending_task.posted_from.file_name(),
431 "src_func", pending_task.posted_from.function_name());
433 DCHECK(nestable_tasks_allowed_);
434 // Execute the task and assume the worst: It is probably not reentrant.
435 nestable_tasks_allowed_ = false;
437 // Before running the task, store the program counter where it was posted
438 // and deliberately alias it to ensure it is on the stack if the task
439 // crashes. Be careful not to assume that the variable itself will have the
440 // expected value when displayed by the optimizer in an optimized build.
441 // Look at a memory dump of the stack.
442 const void* program_counter =
443 pending_task.posted_from.program_counter();
444 debug::Alias(&program_counter);
446 HistogramEvent(kTaskRunEvent);
448 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
449 WillProcessTask(pending_task));
450 pending_task.task.Run();
451 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
452 DidProcessTask(pending_task));
454 tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
455 start_time, tracked_objects::ThreadData::NowForEndOfRun());
457 nestable_tasks_allowed_ = true;
460 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
461 if (pending_task.nestable || run_loop_->run_depth_ == 1) {
462 RunTask(pending_task);
463 // Show that we ran a task (Note: a new one might arrive as a
464 // consequence!).
465 return true;
468 // We couldn't run the task now because we're in a nested message loop
469 // and the task isn't nestable.
470 deferred_non_nestable_work_queue_.push(pending_task);
471 return false;
474 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
475 // Move to the delayed work queue.
476 delayed_work_queue_.push(pending_task);
479 bool MessageLoop::DeletePendingTasks() {
480 bool did_work = !work_queue_.empty();
481 while (!work_queue_.empty()) {
482 PendingTask pending_task = work_queue_.front();
483 work_queue_.pop();
484 if (!pending_task.delayed_run_time.is_null()) {
485 // We want to delete delayed tasks in the same order in which they would
486 // normally be deleted in case of any funny dependencies between delayed
487 // tasks.
488 AddToDelayedWorkQueue(pending_task);
491 did_work |= !deferred_non_nestable_work_queue_.empty();
492 while (!deferred_non_nestable_work_queue_.empty()) {
493 deferred_non_nestable_work_queue_.pop();
495 did_work |= !delayed_work_queue_.empty();
497 // Historically, we always delete the task regardless of valgrind status. It's
498 // not completely clear why we want to leak them in the loops above. This
499 // code is replicating legacy behavior, and should not be considered
500 // absolutely "correct" behavior. See TODO above about deleting all tasks
501 // when it's safe.
502 while (!delayed_work_queue_.empty()) {
503 delayed_work_queue_.pop();
505 return did_work;
508 uint64 MessageLoop::GetTaskTraceID(const PendingTask& task) {
509 return (static_cast<uint64>(task.sequence_num) << 32) |
510 ((static_cast<uint64>(reinterpret_cast<intptr_t>(this)) << 32) >> 32);
513 void MessageLoop::ReloadWorkQueue() {
514 // We can improve performance of our loading tasks from the incoming queue to
515 // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
516 // load. That reduces the number of locks-per-task significantly when our
517 // queues get large.
518 if (work_queue_.empty())
519 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
522 void MessageLoop::ScheduleWork(bool was_empty) {
523 // The Android UI message loop needs to get notified each time
524 // a task is added to the incoming queue.
525 if (was_empty || AlwaysNotifyPump(type_))
526 pump_->ScheduleWork();
529 //------------------------------------------------------------------------------
530 // Method and data for histogramming events and actions taken by each instance
531 // on each thread.
533 void MessageLoop::StartHistogrammer() {
534 #if !defined(OS_NACL) // NaCl build has no metrics code.
535 if (enable_histogrammer_ && !message_histogram_
536 && StatisticsRecorder::IsActive()) {
537 DCHECK(!thread_name_.empty());
538 message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
539 "MsgLoop:" + thread_name_,
540 kLeastNonZeroMessageId, kMaxMessageId,
541 kNumberOfDistinctMessagesDisplayed,
542 message_histogram_->kHexRangePrintingFlag,
543 event_descriptions_);
545 #endif
548 void MessageLoop::HistogramEvent(int event) {
549 #if !defined(OS_NACL)
550 if (message_histogram_)
551 message_histogram_->Add(event);
552 #endif
555 bool MessageLoop::DoWork() {
556 if (!nestable_tasks_allowed_) {
557 // Task can't be executed right now.
558 return false;
561 for (;;) {
562 ReloadWorkQueue();
563 if (work_queue_.empty())
564 break;
566 // Execute oldest task.
567 do {
568 PendingTask pending_task = work_queue_.front();
569 work_queue_.pop();
570 if (!pending_task.delayed_run_time.is_null()) {
571 AddToDelayedWorkQueue(pending_task);
572 // If we changed the topmost task, then it is time to reschedule.
573 if (delayed_work_queue_.top().task.Equals(pending_task.task))
574 pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
575 } else {
576 if (DeferOrRunPendingTask(pending_task))
577 return true;
579 } while (!work_queue_.empty());
582 // Nothing happened.
583 return false;
586 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
587 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
588 recent_time_ = *next_delayed_work_time = TimeTicks();
589 return false;
592 // When we "fall behind," there will be a lot of tasks in the delayed work
593 // queue that are ready to run. To increase efficiency when we fall behind,
594 // we will only call Time::Now() intermittently, and then process all tasks
595 // that are ready to run before calling it again. As a result, the more we
596 // fall behind (and have a lot of ready-to-run delayed tasks), the more
597 // efficient we'll be at handling the tasks.
599 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
600 if (next_run_time > recent_time_) {
601 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
602 if (next_run_time > recent_time_) {
603 *next_delayed_work_time = next_run_time;
604 return false;
608 PendingTask pending_task = delayed_work_queue_.top();
609 delayed_work_queue_.pop();
611 if (!delayed_work_queue_.empty())
612 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
614 return DeferOrRunPendingTask(pending_task);
617 bool MessageLoop::DoIdleWork() {
618 if (ProcessNextDelayedNonNestableTask())
619 return true;
621 if (run_loop_->quit_when_idle_received_)
622 pump_->Quit();
624 return false;
627 void MessageLoop::GetQueueingInformation(size_t* queue_size,
628 TimeDelta* queueing_delay) {
629 *queue_size = work_queue_.size();
630 if (*queue_size == 0) {
631 *queueing_delay = TimeDelta();
632 return;
635 const PendingTask& next_to_run = work_queue_.front();
636 tracked_objects::Duration duration =
637 tracked_objects::TrackedTime::Now() - next_to_run.EffectiveTimePosted();
638 *queueing_delay = TimeDelta::FromMilliseconds(duration.InMilliseconds());
641 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
642 void(*deleter)(const void*),
643 const void* object) {
644 PostNonNestableTask(from_here, Bind(deleter, object));
647 void MessageLoop::ReleaseSoonInternal(
648 const tracked_objects::Location& from_here,
649 void(*releaser)(const void*),
650 const void* object) {
651 PostNonNestableTask(from_here, Bind(releaser, object));
654 #if !defined(OS_NACL)
655 //------------------------------------------------------------------------------
656 // MessageLoopForUI
658 #if defined(OS_ANDROID)
659 void MessageLoopForUI::Start() {
660 // No Histogram support for UI message loop as it is managed by Java side
661 static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
663 #endif
665 #if defined(OS_IOS)
666 void MessageLoopForUI::Attach() {
667 static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
669 #endif
671 #if defined(OS_WIN)
672 void MessageLoopForUI::AddObserver(Observer* observer) {
673 static_cast<MessagePumpWin*>(pump_.get())->AddObserver(observer);
676 void MessageLoopForUI::RemoveObserver(Observer* observer) {
677 static_cast<MessagePumpWin*>(pump_.get())->RemoveObserver(observer);
679 #endif // defined(OS_WIN)
681 #if defined(USE_OZONE) || (defined(OS_CHROMEOS) && !defined(USE_GLIB))
682 bool MessageLoopForUI::WatchFileDescriptor(
683 int fd,
684 bool persistent,
685 MessagePumpLibevent::Mode mode,
686 MessagePumpLibevent::FileDescriptorWatcher *controller,
687 MessagePumpLibevent::Watcher *delegate) {
688 return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
690 persistent,
691 mode,
692 controller,
693 delegate);
695 #endif
697 #endif // !defined(OS_NACL)
699 //------------------------------------------------------------------------------
700 // MessageLoopForIO
702 #if !defined(OS_NACL)
703 void MessageLoopForIO::AddIOObserver(
704 MessageLoopForIO::IOObserver* io_observer) {
705 ToPumpIO(pump_.get())->AddIOObserver(io_observer);
708 void MessageLoopForIO::RemoveIOObserver(
709 MessageLoopForIO::IOObserver* io_observer) {
710 ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
713 #if defined(OS_WIN)
714 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
715 ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
718 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
719 return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
722 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
723 return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
725 #elif defined(OS_POSIX)
726 bool MessageLoopForIO::WatchFileDescriptor(int fd,
727 bool persistent,
728 Mode mode,
729 FileDescriptorWatcher *controller,
730 Watcher *delegate) {
731 return ToPumpIO(pump_.get())->WatchFileDescriptor(
733 persistent,
734 mode,
735 controller,
736 delegate);
738 #endif
740 #endif // !defined(OS_NACL)
742 } // namespace base