Fix clank compilation errors in the relocation_packer.
[chromium-blink-merge.git] / base / message_loop / message_loop.cc
blobccece4d38ca1d4acb3d9810b9eb7ba9d646a58a4
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop/message_loop.h"
7 #include <algorithm>
9 #include "base/bind.h"
10 #include "base/compiler_specific.h"
11 #include "base/debug/alias.h"
12 #include "base/debug/trace_event.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h"
15 #include "base/memory/scoped_ptr.h"
16 #include "base/message_loop/message_pump_default.h"
17 #include "base/metrics/histogram.h"
18 #include "base/metrics/statistics_recorder.h"
19 #include "base/run_loop.h"
20 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/threading/thread_local.h"
23 #include "base/time/time.h"
24 #include "base/tracked_objects.h"
26 #if defined(OS_MACOSX)
27 #include "base/message_loop/message_pump_mac.h"
28 #endif
29 #if defined(OS_POSIX) && !defined(OS_IOS)
30 #include "base/message_loop/message_pump_libevent.h"
31 #endif
32 #if defined(OS_ANDROID)
33 #include "base/message_loop/message_pump_android.h"
34 #endif
35 #if defined(USE_GLIB)
36 #include "base/message_loop/message_pump_glib.h"
37 #endif
39 namespace base {
41 namespace {
43 // A lazily created thread local storage for quick access to a thread's message
44 // loop, if one exists. This should be safe and free of static constructors.
45 LazyInstance<base::ThreadLocalPointer<MessageLoop> >::Leaky lazy_tls_ptr =
46 LAZY_INSTANCE_INITIALIZER;
48 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
49 // to get an accounting of messages and actions taken on each thread.
50 const int kTaskRunEvent = 0x1;
51 #if !defined(OS_NACL)
52 const int kTimerEvent = 0x2;
54 // Provide range of message IDs for use in histogramming and debug display.
55 const int kLeastNonZeroMessageId = 1;
56 const int kMaxMessageId = 1099;
57 const int kNumberOfDistinctMessagesDisplayed = 1100;
59 // Provide a macro that takes an expression (such as a constant, or macro
60 // constant) and creates a pair to initalize an array of pairs. In this case,
61 // our pair consists of the expressions value, and the "stringized" version
62 // of the expression (i.e., the exrpression put in quotes). For example, if
63 // we have:
64 // #define FOO 2
65 // #define BAR 5
66 // then the following:
67 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
68 // will expand to:
69 // {7, "FOO + BAR"}
70 // We use the resulting array as an argument to our histogram, which reads the
71 // number as a bucket identifier, and proceeds to use the corresponding name
72 // in the pair (i.e., the quoted string) when printing out a histogram.
73 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
75 const LinearHistogram::DescriptionPair event_descriptions_[] = {
76 // Provide some pretty print capability in our histogram for our internal
77 // messages.
79 // A few events we handle (kindred to messages), and used to profile actions.
80 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
81 VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
83 {-1, NULL} // The list must be null terminated, per API to histogram.
85 #endif // !defined(OS_NACL)
87 bool enable_histogrammer_ = false;
89 MessageLoop::MessagePumpFactory* message_pump_for_ui_factory_ = NULL;
91 // Returns true if MessagePump::ScheduleWork() must be called one
92 // time for every task that is added to the MessageLoop incoming queue.
93 bool AlwaysNotifyPump(MessageLoop::Type type) {
94 #if defined(OS_ANDROID)
95 return type == MessageLoop::TYPE_UI || type == MessageLoop::TYPE_JAVA;
96 #else
97 return false;
98 #endif
101 #if defined(OS_IOS)
102 typedef MessagePumpIOSForIO MessagePumpForIO;
103 #elif defined(OS_NACL)
104 typedef MessagePumpDefault MessagePumpForIO;
105 #elif defined(OS_POSIX)
106 typedef MessagePumpLibevent MessagePumpForIO;
107 #endif
109 MessagePumpForIO* ToPumpIO(MessagePump* pump) {
110 return static_cast<MessagePumpForIO*>(pump);
113 } // namespace
115 //------------------------------------------------------------------------------
117 MessageLoop::TaskObserver::TaskObserver() {
120 MessageLoop::TaskObserver::~TaskObserver() {
123 MessageLoop::DestructionObserver::~DestructionObserver() {
126 //------------------------------------------------------------------------------
128 MessageLoop::MessageLoop(Type type)
129 : type_(type),
130 nestable_tasks_allowed_(true),
131 #if defined(OS_WIN)
132 os_modal_loop_(false),
133 #endif // OS_WIN
134 message_histogram_(NULL),
135 run_loop_(NULL) {
136 Init();
138 pump_ = CreateMessagePumpForType(type).Pass();
141 MessageLoop::MessageLoop(scoped_ptr<MessagePump> pump)
142 : pump_(pump.Pass()),
143 type_(TYPE_CUSTOM),
144 nestable_tasks_allowed_(true),
145 #if defined(OS_WIN)
146 os_modal_loop_(false),
147 #endif // OS_WIN
148 message_histogram_(NULL),
149 run_loop_(NULL) {
150 DCHECK(pump_.get());
151 Init();
154 MessageLoop::~MessageLoop() {
155 DCHECK_EQ(this, current());
157 DCHECK(!run_loop_);
159 // Clean up any unprocessed tasks, but take care: deleting a task could
160 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
161 // limit on the number of times we will allow a deleted task to generate more
162 // tasks. Normally, we should only pass through this loop once or twice. If
163 // we end up hitting the loop limit, then it is probably due to one task that
164 // is being stubborn. Inspect the queues to see who is left.
165 bool did_work;
166 for (int i = 0; i < 100; ++i) {
167 DeletePendingTasks();
168 ReloadWorkQueue();
169 // If we end up with empty queues, then break out of the loop.
170 did_work = DeletePendingTasks();
171 if (!did_work)
172 break;
174 DCHECK(!did_work);
176 // Let interested parties have one last shot at accessing this.
177 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
178 WillDestroyCurrentMessageLoop());
180 thread_task_runner_handle_.reset();
182 // Tell the incoming queue that we are dying.
183 incoming_task_queue_->WillDestroyCurrentMessageLoop();
184 incoming_task_queue_ = NULL;
185 message_loop_proxy_ = NULL;
187 // OK, now make it so that no one can find us.
188 lazy_tls_ptr.Pointer()->Set(NULL);
191 // static
192 MessageLoop* MessageLoop::current() {
193 // TODO(darin): sadly, we cannot enable this yet since people call us even
194 // when they have no intention of using us.
195 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
196 return lazy_tls_ptr.Pointer()->Get();
199 // static
200 void MessageLoop::EnableHistogrammer(bool enable) {
201 enable_histogrammer_ = enable;
204 // static
205 bool MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory* factory) {
206 if (message_pump_for_ui_factory_)
207 return false;
209 message_pump_for_ui_factory_ = factory;
210 return true;
213 // static
214 scoped_ptr<MessagePump> MessageLoop::CreateMessagePumpForType(Type type) {
215 // TODO(rvargas): Get rid of the OS guards.
216 #if defined(USE_GLIB) && !defined(OS_NACL)
217 typedef MessagePumpGlib MessagePumpForUI;
218 #elif defined(OS_LINUX) && !defined(OS_NACL)
219 typedef MessagePumpLibevent MessagePumpForUI;
220 #endif
222 #if defined(OS_IOS) || defined(OS_MACOSX)
223 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(MessagePumpMac::Create())
224 #elif defined(OS_NACL)
225 // Currently NaCl doesn't have a UI MessageLoop.
226 // TODO(abarth): Figure out if we need this.
227 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>()
228 #else
229 #define MESSAGE_PUMP_UI scoped_ptr<MessagePump>(new MessagePumpForUI())
230 #endif
232 #if defined(OS_MACOSX)
233 // Use an OS native runloop on Mac to support timer coalescing.
234 #define MESSAGE_PUMP_DEFAULT \
235 scoped_ptr<MessagePump>(new MessagePumpCFRunLoop())
236 #else
237 #define MESSAGE_PUMP_DEFAULT scoped_ptr<MessagePump>(new MessagePumpDefault())
238 #endif
240 if (type == MessageLoop::TYPE_UI) {
241 if (message_pump_for_ui_factory_)
242 return message_pump_for_ui_factory_();
243 return MESSAGE_PUMP_UI;
245 if (type == MessageLoop::TYPE_IO)
246 return scoped_ptr<MessagePump>(new MessagePumpForIO());
248 #if defined(OS_ANDROID)
249 if (type == MessageLoop::TYPE_JAVA)
250 return scoped_ptr<MessagePump>(new MessagePumpForUI());
251 #endif
253 DCHECK_EQ(MessageLoop::TYPE_DEFAULT, type);
254 return MESSAGE_PUMP_DEFAULT;
257 void MessageLoop::AddDestructionObserver(
258 DestructionObserver* destruction_observer) {
259 DCHECK_EQ(this, current());
260 destruction_observers_.AddObserver(destruction_observer);
263 void MessageLoop::RemoveDestructionObserver(
264 DestructionObserver* destruction_observer) {
265 DCHECK_EQ(this, current());
266 destruction_observers_.RemoveObserver(destruction_observer);
269 void MessageLoop::PostTask(
270 const tracked_objects::Location& from_here,
271 const Closure& task) {
272 DCHECK(!task.is_null()) << from_here.ToString();
273 incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), true);
276 void MessageLoop::PostDelayedTask(
277 const tracked_objects::Location& from_here,
278 const Closure& task,
279 TimeDelta delay) {
280 DCHECK(!task.is_null()) << from_here.ToString();
281 incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, true);
284 void MessageLoop::PostNonNestableTask(
285 const tracked_objects::Location& from_here,
286 const Closure& task) {
287 DCHECK(!task.is_null()) << from_here.ToString();
288 incoming_task_queue_->AddToIncomingQueue(from_here, task, TimeDelta(), false);
291 void MessageLoop::PostNonNestableDelayedTask(
292 const tracked_objects::Location& from_here,
293 const Closure& task,
294 TimeDelta delay) {
295 DCHECK(!task.is_null()) << from_here.ToString();
296 incoming_task_queue_->AddToIncomingQueue(from_here, task, delay, false);
299 void MessageLoop::Run() {
300 RunLoop run_loop;
301 run_loop.Run();
304 void MessageLoop::RunUntilIdle() {
305 RunLoop run_loop;
306 run_loop.RunUntilIdle();
309 void MessageLoop::QuitWhenIdle() {
310 DCHECK_EQ(this, current());
311 if (run_loop_) {
312 run_loop_->quit_when_idle_received_ = true;
313 } else {
314 NOTREACHED() << "Must be inside Run to call Quit";
318 void MessageLoop::QuitNow() {
319 DCHECK_EQ(this, current());
320 if (run_loop_) {
321 pump_->Quit();
322 } else {
323 NOTREACHED() << "Must be inside Run to call Quit";
327 bool MessageLoop::IsType(Type type) const {
328 return type_ == type;
331 static void QuitCurrentWhenIdle() {
332 MessageLoop::current()->QuitWhenIdle();
335 // static
336 Closure MessageLoop::QuitWhenIdleClosure() {
337 return Bind(&QuitCurrentWhenIdle);
340 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
341 if (allowed) {
342 // Kick the native pump just in case we enter a OS-driven nested message
343 // loop.
344 pump_->ScheduleWork();
346 nestable_tasks_allowed_ = allowed;
349 bool MessageLoop::NestableTasksAllowed() const {
350 return nestable_tasks_allowed_;
353 bool MessageLoop::IsNested() {
354 return run_loop_->run_depth_ > 1;
357 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
358 DCHECK_EQ(this, current());
359 task_observers_.AddObserver(task_observer);
362 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
363 DCHECK_EQ(this, current());
364 task_observers_.RemoveObserver(task_observer);
367 bool MessageLoop::is_running() const {
368 DCHECK_EQ(this, current());
369 return run_loop_ != NULL;
372 bool MessageLoop::IsHighResolutionTimerEnabledForTesting() {
373 return incoming_task_queue_->IsHighResolutionTimerEnabledForTesting();
376 bool MessageLoop::IsIdleForTesting() {
377 // We only check the imcoming queue|, since we don't want to lock the work
378 // queue.
379 return incoming_task_queue_->IsIdleForTesting();
382 //------------------------------------------------------------------------------
384 void MessageLoop::Init() {
385 DCHECK(!current()) << "should only have one message loop per thread";
386 lazy_tls_ptr.Pointer()->Set(this);
388 incoming_task_queue_ = new internal::IncomingTaskQueue(this);
389 message_loop_proxy_ =
390 new internal::MessageLoopProxyImpl(incoming_task_queue_);
391 thread_task_runner_handle_.reset(
392 new ThreadTaskRunnerHandle(message_loop_proxy_));
395 void MessageLoop::RunHandler() {
396 DCHECK_EQ(this, current());
398 StartHistogrammer();
400 #if defined(OS_WIN)
401 if (run_loop_->dispatcher_ && type() == TYPE_UI) {
402 static_cast<MessagePumpForUI*>(pump_.get())->
403 RunWithDispatcher(this, run_loop_->dispatcher_);
404 return;
406 #endif
408 pump_->Run(this);
411 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
412 if (run_loop_->run_depth_ != 1)
413 return false;
415 if (deferred_non_nestable_work_queue_.empty())
416 return false;
418 PendingTask pending_task = deferred_non_nestable_work_queue_.front();
419 deferred_non_nestable_work_queue_.pop();
421 RunTask(pending_task);
422 return true;
425 void MessageLoop::RunTask(const PendingTask& pending_task) {
426 tracked_objects::TrackedTime start_time =
427 tracked_objects::ThreadData::NowForStartOfRun(pending_task.birth_tally);
429 TRACE_EVENT_FLOW_END1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
430 "MessageLoop::PostTask", TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
431 "queue_duration",
432 (start_time - pending_task.EffectiveTimePosted()).InMilliseconds());
433 // When tracing memory for posted tasks it's more valuable to attribute the
434 // memory allocations to the source function than generically to "RunTask".
435 TRACE_EVENT_WITH_MEMORY_TAG2(
436 "toplevel", "MessageLoop::RunTask",
437 pending_task.posted_from.function_name(), // Name for memory tracking.
438 "src_file", pending_task.posted_from.file_name(),
439 "src_func", pending_task.posted_from.function_name());
441 DCHECK(nestable_tasks_allowed_);
442 // Execute the task and assume the worst: It is probably not reentrant.
443 nestable_tasks_allowed_ = false;
445 // Before running the task, store the program counter where it was posted
446 // and deliberately alias it to ensure it is on the stack if the task
447 // crashes. Be careful not to assume that the variable itself will have the
448 // expected value when displayed by the optimizer in an optimized build.
449 // Look at a memory dump of the stack.
450 const void* program_counter =
451 pending_task.posted_from.program_counter();
452 debug::Alias(&program_counter);
454 HistogramEvent(kTaskRunEvent);
456 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
457 WillProcessTask(pending_task));
458 pending_task.task.Run();
459 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
460 DidProcessTask(pending_task));
462 tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(pending_task,
463 start_time, tracked_objects::ThreadData::NowForEndOfRun());
465 nestable_tasks_allowed_ = true;
468 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
469 if (pending_task.nestable || run_loop_->run_depth_ == 1) {
470 RunTask(pending_task);
471 // Show that we ran a task (Note: a new one might arrive as a
472 // consequence!).
473 return true;
476 // We couldn't run the task now because we're in a nested message loop
477 // and the task isn't nestable.
478 deferred_non_nestable_work_queue_.push(pending_task);
479 return false;
482 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
483 // Move to the delayed work queue.
484 delayed_work_queue_.push(pending_task);
487 bool MessageLoop::DeletePendingTasks() {
488 bool did_work = !work_queue_.empty();
489 while (!work_queue_.empty()) {
490 PendingTask pending_task = work_queue_.front();
491 work_queue_.pop();
492 if (!pending_task.delayed_run_time.is_null()) {
493 // We want to delete delayed tasks in the same order in which they would
494 // normally be deleted in case of any funny dependencies between delayed
495 // tasks.
496 AddToDelayedWorkQueue(pending_task);
499 did_work |= !deferred_non_nestable_work_queue_.empty();
500 while (!deferred_non_nestable_work_queue_.empty()) {
501 deferred_non_nestable_work_queue_.pop();
503 did_work |= !delayed_work_queue_.empty();
505 // Historically, we always delete the task regardless of valgrind status. It's
506 // not completely clear why we want to leak them in the loops above. This
507 // code is replicating legacy behavior, and should not be considered
508 // absolutely "correct" behavior. See TODO above about deleting all tasks
509 // when it's safe.
510 while (!delayed_work_queue_.empty()) {
511 delayed_work_queue_.pop();
513 return did_work;
516 uint64 MessageLoop::GetTaskTraceID(const PendingTask& task) {
517 return (static_cast<uint64>(task.sequence_num) << 32) |
518 ((static_cast<uint64>(reinterpret_cast<intptr_t>(this)) << 32) >> 32);
521 void MessageLoop::ReloadWorkQueue() {
522 // We can improve performance of our loading tasks from the incoming queue to
523 // |*work_queue| by waiting until the last minute (|*work_queue| is empty) to
524 // load. That reduces the number of locks-per-task significantly when our
525 // queues get large.
526 if (work_queue_.empty())
527 incoming_task_queue_->ReloadWorkQueue(&work_queue_);
530 void MessageLoop::ScheduleWork(bool was_empty) {
531 // The Android UI message loop needs to get notified each time
532 // a task is added to the incoming queue.
533 if (was_empty || AlwaysNotifyPump(type_))
534 pump_->ScheduleWork();
537 //------------------------------------------------------------------------------
538 // Method and data for histogramming events and actions taken by each instance
539 // on each thread.
541 void MessageLoop::StartHistogrammer() {
542 #if !defined(OS_NACL) // NaCl build has no metrics code.
543 if (enable_histogrammer_ && !message_histogram_
544 && StatisticsRecorder::IsActive()) {
545 DCHECK(!thread_name_.empty());
546 message_histogram_ = LinearHistogram::FactoryGetWithRangeDescription(
547 "MsgLoop:" + thread_name_,
548 kLeastNonZeroMessageId, kMaxMessageId,
549 kNumberOfDistinctMessagesDisplayed,
550 message_histogram_->kHexRangePrintingFlag,
551 event_descriptions_);
553 #endif
556 void MessageLoop::HistogramEvent(int event) {
557 #if !defined(OS_NACL)
558 if (message_histogram_)
559 message_histogram_->Add(event);
560 #endif
563 bool MessageLoop::DoWork() {
564 if (!nestable_tasks_allowed_) {
565 // Task can't be executed right now.
566 return false;
569 for (;;) {
570 ReloadWorkQueue();
571 if (work_queue_.empty())
572 break;
574 // Execute oldest task.
575 do {
576 PendingTask pending_task = work_queue_.front();
577 work_queue_.pop();
578 if (!pending_task.delayed_run_time.is_null()) {
579 AddToDelayedWorkQueue(pending_task);
580 // If we changed the topmost task, then it is time to reschedule.
581 if (delayed_work_queue_.top().task.Equals(pending_task.task))
582 pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
583 } else {
584 if (DeferOrRunPendingTask(pending_task))
585 return true;
587 } while (!work_queue_.empty());
590 // Nothing happened.
591 return false;
594 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
595 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
596 recent_time_ = *next_delayed_work_time = TimeTicks();
597 return false;
600 // When we "fall behind," there will be a lot of tasks in the delayed work
601 // queue that are ready to run. To increase efficiency when we fall behind,
602 // we will only call Time::Now() intermittently, and then process all tasks
603 // that are ready to run before calling it again. As a result, the more we
604 // fall behind (and have a lot of ready-to-run delayed tasks), the more
605 // efficient we'll be at handling the tasks.
607 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
608 if (next_run_time > recent_time_) {
609 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
610 if (next_run_time > recent_time_) {
611 *next_delayed_work_time = next_run_time;
612 return false;
616 PendingTask pending_task = delayed_work_queue_.top();
617 delayed_work_queue_.pop();
619 if (!delayed_work_queue_.empty())
620 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
622 return DeferOrRunPendingTask(pending_task);
625 bool MessageLoop::DoIdleWork() {
626 if (ProcessNextDelayedNonNestableTask())
627 return true;
629 if (run_loop_->quit_when_idle_received_)
630 pump_->Quit();
632 return false;
635 void MessageLoop::GetQueueingInformation(size_t* queue_size,
636 TimeDelta* queueing_delay) {
637 *queue_size = work_queue_.size();
638 if (*queue_size == 0) {
639 *queueing_delay = TimeDelta();
640 return;
643 const PendingTask& next_to_run = work_queue_.front();
644 tracked_objects::Duration duration =
645 tracked_objects::TrackedTime::Now() - next_to_run.EffectiveTimePosted();
646 *queueing_delay = TimeDelta::FromMilliseconds(duration.InMilliseconds());
649 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location& from_here,
650 void(*deleter)(const void*),
651 const void* object) {
652 PostNonNestableTask(from_here, Bind(deleter, object));
655 void MessageLoop::ReleaseSoonInternal(
656 const tracked_objects::Location& from_here,
657 void(*releaser)(const void*),
658 const void* object) {
659 PostNonNestableTask(from_here, Bind(releaser, object));
662 #if !defined(OS_NACL)
663 //------------------------------------------------------------------------------
664 // MessageLoopForUI
666 #if defined(OS_ANDROID)
667 void MessageLoopForUI::Start() {
668 // No Histogram support for UI message loop as it is managed by Java side
669 static_cast<MessagePumpForUI*>(pump_.get())->Start(this);
671 #endif
673 #if defined(OS_IOS)
674 void MessageLoopForUI::Attach() {
675 static_cast<MessagePumpUIApplication*>(pump_.get())->Attach(this);
677 #endif
679 #if defined(OS_WIN)
680 void MessageLoopForUI::AddObserver(Observer* observer) {
681 static_cast<MessagePumpWin*>(pump_.get())->AddObserver(observer);
684 void MessageLoopForUI::RemoveObserver(Observer* observer) {
685 static_cast<MessagePumpWin*>(pump_.get())->RemoveObserver(observer);
687 #endif // defined(OS_WIN)
689 #if defined(USE_OZONE) || (defined(OS_CHROMEOS) && !defined(USE_GLIB))
690 bool MessageLoopForUI::WatchFileDescriptor(
691 int fd,
692 bool persistent,
693 MessagePumpLibevent::Mode mode,
694 MessagePumpLibevent::FileDescriptorWatcher *controller,
695 MessagePumpLibevent::Watcher *delegate) {
696 return static_cast<MessagePumpLibevent*>(pump_.get())->WatchFileDescriptor(
698 persistent,
699 mode,
700 controller,
701 delegate);
703 #endif
705 #endif // !defined(OS_NACL)
707 //------------------------------------------------------------------------------
708 // MessageLoopForIO
710 #if !defined(OS_NACL)
711 void MessageLoopForIO::AddIOObserver(
712 MessageLoopForIO::IOObserver* io_observer) {
713 ToPumpIO(pump_.get())->AddIOObserver(io_observer);
716 void MessageLoopForIO::RemoveIOObserver(
717 MessageLoopForIO::IOObserver* io_observer) {
718 ToPumpIO(pump_.get())->RemoveIOObserver(io_observer);
721 #if defined(OS_WIN)
722 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
723 ToPumpIO(pump_.get())->RegisterIOHandler(file, handler);
726 bool MessageLoopForIO::RegisterJobObject(HANDLE job, IOHandler* handler) {
727 return ToPumpIO(pump_.get())->RegisterJobObject(job, handler);
730 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
731 return ToPumpIO(pump_.get())->WaitForIOCompletion(timeout, filter);
733 #elif defined(OS_POSIX)
734 bool MessageLoopForIO::WatchFileDescriptor(int fd,
735 bool persistent,
736 Mode mode,
737 FileDescriptorWatcher *controller,
738 Watcher *delegate) {
739 return ToPumpIO(pump_.get())->WatchFileDescriptor(
741 persistent,
742 mode,
743 controller,
744 delegate);
746 #endif
748 #endif // !defined(OS_NACL)
750 } // namespace base