1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop.h"
10 #include "base/compiler_specific.h"
11 #include "base/debug/alias.h"
12 #include "base/debug/trace_event.h"
13 #include "base/lazy_instance.h"
14 #include "base/logging.h"
15 #include "base/memory/scoped_ptr.h"
16 #include "base/message_loop_proxy_impl.h"
17 #include "base/message_pump_default.h"
18 #include "base/metrics/histogram.h"
19 #include "base/metrics/statistics_recorder.h"
20 #include "base/run_loop.h"
21 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
22 #include "base/thread_task_runner_handle.h"
23 #include "base/threading/thread_local.h"
24 #include "base/time.h"
25 #include "base/tracked_objects.h"
27 #if defined(OS_MACOSX)
28 #include "base/message_pump_mac.h"
30 #if defined(OS_POSIX) && !defined(OS_IOS)
31 #include "base/message_pump_libevent.h"
33 #if defined(OS_ANDROID)
34 #include "base/message_pump_android.h"
37 #if defined(TOOLKIT_GTK)
42 using base::PendingTask
;
43 using base::TimeDelta
;
44 using base::TimeTicks
;
48 // A lazily created thread local storage for quick access to a thread's message
49 // loop, if one exists. This should be safe and free of static constructors.
50 base::LazyInstance
<base::ThreadLocalPointer
<MessageLoop
> > lazy_tls_ptr
=
51 LAZY_INSTANCE_INITIALIZER
;
53 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
54 // to get an accounting of messages and actions taken on each thread.
55 const int kTaskRunEvent
= 0x1;
56 const int kTimerEvent
= 0x2;
58 // Provide range of message IDs for use in histogramming and debug display.
59 const int kLeastNonZeroMessageId
= 1;
60 const int kMaxMessageId
= 1099;
61 const int kNumberOfDistinctMessagesDisplayed
= 1100;
63 // Provide a macro that takes an expression (such as a constant, or macro
64 // constant) and creates a pair to initalize an array of pairs. In this case,
65 // our pair consists of the expressions value, and the "stringized" version
66 // of the expression (i.e., the exrpression put in quotes). For example, if
70 // then the following:
71 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
74 // We use the resulting array as an argument to our histogram, which reads the
75 // number as a bucket identifier, and proceeds to use the corresponding name
76 // in the pair (i.e., the quoted string) when printing out a histogram.
77 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
79 const base::LinearHistogram::DescriptionPair event_descriptions_
[] = {
80 // Provide some pretty print capability in our histogram for our internal
83 // A few events we handle (kindred to messages), and used to profile actions.
84 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent
)
85 VALUE_TO_NUMBER_AND_NAME(kTimerEvent
)
87 {-1, NULL
} // The list must be null terminated, per API to histogram.
90 bool enable_histogrammer_
= false;
92 MessageLoop::MessagePumpFactory
* message_pump_for_ui_factory_
= NULL
;
94 // Create a process-wide unique ID to represent this task in trace events. This
95 // will be mangled with a Process ID hash to reduce the likelyhood of colliding
96 // with MessageLoop pointers on other processes.
97 uint64
GetTaskTraceID(const PendingTask
& task
, MessageLoop
* loop
) {
98 return (static_cast<uint64
>(task
.sequence_num
) << 32) |
99 static_cast<uint64
>(reinterpret_cast<intptr_t>(loop
));
104 //------------------------------------------------------------------------------
108 // Upon a SEH exception in this thread, it restores the original unhandled
110 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter
) {
111 ::SetUnhandledExceptionFilter(old_filter
);
112 return EXCEPTION_CONTINUE_SEARCH
;
115 // Retrieves a pointer to the current unhandled exception filter. There
116 // is no standalone getter method.
117 static LPTOP_LEVEL_EXCEPTION_FILTER
GetTopSEHFilter() {
118 LPTOP_LEVEL_EXCEPTION_FILTER top_filter
= NULL
;
119 top_filter
= ::SetUnhandledExceptionFilter(0);
120 ::SetUnhandledExceptionFilter(top_filter
);
124 #endif // defined(OS_WIN)
126 //------------------------------------------------------------------------------
128 MessageLoop::TaskObserver::TaskObserver() {
131 MessageLoop::TaskObserver::~TaskObserver() {
134 MessageLoop::DestructionObserver::~DestructionObserver() {
137 //------------------------------------------------------------------------------
139 MessageLoop::MessageLoop(Type type
)
141 nestable_tasks_allowed_(true),
142 exception_restoration_(false),
143 message_histogram_(NULL
),
146 os_modal_loop_(false),
148 next_sequence_num_(0) {
149 DCHECK(!current()) << "should only have one message loop per thread";
150 lazy_tls_ptr
.Pointer()->Set(this);
152 message_loop_proxy_
= new base::MessageLoopProxyImpl();
153 thread_task_runner_handle_
.reset(
154 new base::ThreadTaskRunnerHandle(message_loop_proxy_
));
156 // TODO(rvargas): Get rid of the OS guards.
158 #define MESSAGE_PUMP_UI new base::MessagePumpForUI()
159 #define MESSAGE_PUMP_IO new base::MessagePumpForIO()
160 #elif defined(OS_IOS)
161 #define MESSAGE_PUMP_UI base::MessagePumpMac::Create()
162 #define MESSAGE_PUMP_IO new base::MessagePumpIOSForIO()
163 #elif defined(OS_MACOSX)
164 #define MESSAGE_PUMP_UI base::MessagePumpMac::Create()
165 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
166 #elif defined(OS_NACL)
167 // Currently NaCl doesn't have a UI MessageLoop.
168 // TODO(abarth): Figure out if we need this.
169 #define MESSAGE_PUMP_UI NULL
170 // ipc_channel_nacl.cc uses a worker thread to do socket reads currently, and
171 // doesn't require extra support for watching file descriptors.
172 #define MESSAGE_PUMP_IO new base::MessagePumpDefault();
173 #elif defined(OS_POSIX) // POSIX but not MACOSX.
174 #define MESSAGE_PUMP_UI new base::MessagePumpForUI()
175 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
177 #error Not implemented
180 if (type_
== TYPE_UI
) {
181 if (message_pump_for_ui_factory_
)
182 pump_
= message_pump_for_ui_factory_();
184 pump_
= MESSAGE_PUMP_UI
;
185 } else if (type_
== TYPE_IO
) {
186 pump_
= MESSAGE_PUMP_IO
;
188 DCHECK_EQ(TYPE_DEFAULT
, type_
);
189 pump_
= new base::MessagePumpDefault();
193 MessageLoop::~MessageLoop() {
194 DCHECK_EQ(this, current());
198 // Clean up any unprocessed tasks, but take care: deleting a task could
199 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
200 // limit on the number of times we will allow a deleted task to generate more
201 // tasks. Normally, we should only pass through this loop once or twice. If
202 // we end up hitting the loop limit, then it is probably due to one task that
203 // is being stubborn. Inspect the queues to see who is left.
205 for (int i
= 0; i
< 100; ++i
) {
206 DeletePendingTasks();
208 // If we end up with empty queues, then break out of the loop.
209 did_work
= DeletePendingTasks();
215 // Let interested parties have one last shot at accessing this.
216 FOR_EACH_OBSERVER(DestructionObserver
, destruction_observers_
,
217 WillDestroyCurrentMessageLoop());
219 thread_task_runner_handle_
.reset();
221 // Tell the message_loop_proxy that we are dying.
222 static_cast<base::MessageLoopProxyImpl
*>(message_loop_proxy_
.get())->
223 WillDestroyCurrentMessageLoop();
224 message_loop_proxy_
= NULL
;
226 // OK, now make it so that no one can find us.
227 lazy_tls_ptr
.Pointer()->Set(NULL
);
230 // If we left the high-resolution timer activated, deactivate it now.
231 // Doing this is not-critical, it is mainly to make sure we track
232 // the high resolution timer activations properly in our unit tests.
233 if (!high_resolution_timer_expiration_
.is_null()) {
234 base::Time::ActivateHighResolutionTimer(false);
235 high_resolution_timer_expiration_
= base::TimeTicks();
241 MessageLoop
* MessageLoop::current() {
242 // TODO(darin): sadly, we cannot enable this yet since people call us even
243 // when they have no intention of using us.
244 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
245 return lazy_tls_ptr
.Pointer()->Get();
249 void MessageLoop::EnableHistogrammer(bool enable
) {
250 enable_histogrammer_
= enable
;
254 void MessageLoop::InitMessagePumpForUIFactory(MessagePumpFactory
* factory
) {
255 DCHECK(!message_pump_for_ui_factory_
);
256 message_pump_for_ui_factory_
= factory
;
259 void MessageLoop::AddDestructionObserver(
260 DestructionObserver
* destruction_observer
) {
261 DCHECK_EQ(this, current());
262 destruction_observers_
.AddObserver(destruction_observer
);
265 void MessageLoop::RemoveDestructionObserver(
266 DestructionObserver
* destruction_observer
) {
267 DCHECK_EQ(this, current());
268 destruction_observers_
.RemoveObserver(destruction_observer
);
271 void MessageLoop::PostTask(
272 const tracked_objects::Location
& from_here
, const base::Closure
& task
) {
273 DCHECK(!task
.is_null()) << from_here
.ToString();
274 PendingTask
pending_task(
275 from_here
, task
, CalculateDelayedRuntime(TimeDelta()), true);
276 AddToIncomingQueue(&pending_task
);
279 void MessageLoop::PostDelayedTask(
280 const tracked_objects::Location
& from_here
,
281 const base::Closure
& task
,
283 DCHECK(!task
.is_null()) << from_here
.ToString();
284 PendingTask
pending_task(
285 from_here
, task
, CalculateDelayedRuntime(delay
), true);
286 AddToIncomingQueue(&pending_task
);
289 void MessageLoop::PostNonNestableTask(
290 const tracked_objects::Location
& from_here
,
291 const base::Closure
& task
) {
292 DCHECK(!task
.is_null()) << from_here
.ToString();
293 PendingTask
pending_task(
294 from_here
, task
, CalculateDelayedRuntime(TimeDelta()), false);
295 AddToIncomingQueue(&pending_task
);
298 void MessageLoop::PostNonNestableDelayedTask(
299 const tracked_objects::Location
& from_here
,
300 const base::Closure
& task
,
302 DCHECK(!task
.is_null()) << from_here
.ToString();
303 PendingTask
pending_task(
304 from_here
, task
, CalculateDelayedRuntime(delay
), false);
305 AddToIncomingQueue(&pending_task
);
308 void MessageLoop::Run() {
309 base::RunLoop run_loop
;
313 void MessageLoop::RunUntilIdle() {
314 base::RunLoop run_loop
;
315 run_loop
.RunUntilIdle();
318 void MessageLoop::QuitWhenIdle() {
319 DCHECK_EQ(this, current());
321 run_loop_
->quit_when_idle_received_
= true;
323 NOTREACHED() << "Must be inside Run to call Quit";
327 void MessageLoop::QuitNow() {
328 DCHECK_EQ(this, current());
332 NOTREACHED() << "Must be inside Run to call Quit";
336 bool MessageLoop::IsType(Type type
) const {
337 return type_
== type
;
340 static void QuitCurrentWhenIdle() {
341 MessageLoop::current()->QuitWhenIdle();
345 base::Closure
MessageLoop::QuitWhenIdleClosure() {
346 return base::Bind(&QuitCurrentWhenIdle
);
349 void MessageLoop::SetNestableTasksAllowed(bool allowed
) {
350 if (nestable_tasks_allowed_
!= allowed
) {
351 nestable_tasks_allowed_
= allowed
;
352 if (!nestable_tasks_allowed_
)
354 // Start the native pump if we are not already pumping.
355 pump_
->ScheduleWork();
359 bool MessageLoop::NestableTasksAllowed() const {
360 return nestable_tasks_allowed_
;
363 bool MessageLoop::IsNested() {
364 return run_loop_
->run_depth_
> 1;
367 void MessageLoop::AddTaskObserver(TaskObserver
* task_observer
) {
368 DCHECK_EQ(this, current());
369 task_observers_
.AddObserver(task_observer
);
372 void MessageLoop::RemoveTaskObserver(TaskObserver
* task_observer
) {
373 DCHECK_EQ(this, current());
374 task_observers_
.RemoveObserver(task_observer
);
377 void MessageLoop::AssertIdle() const {
378 // We only check |incoming_queue_|, since we don't want to lock |work_queue_|.
379 base::AutoLock
lock(incoming_queue_lock_
);
380 DCHECK(incoming_queue_
.empty());
383 bool MessageLoop::is_running() const {
384 DCHECK_EQ(this, current());
385 return run_loop_
!= NULL
;
388 //------------------------------------------------------------------------------
390 // Runs the loop in two different SEH modes:
391 // enable_SEH_restoration_ = false : any unhandled exception goes to the last
392 // one that calls SetUnhandledExceptionFilter().
393 // enable_SEH_restoration_ = true : any unhandled exception goes to the filter
394 // that was existed before the loop was run.
395 void MessageLoop::RunHandler() {
397 if (exception_restoration_
) {
398 RunInternalInSEHFrame();
407 __declspec(noinline
) void MessageLoop::RunInternalInSEHFrame() {
408 LPTOP_LEVEL_EXCEPTION_FILTER current_filter
= GetTopSEHFilter();
411 } __except(SEHFilter(current_filter
)) {
417 void MessageLoop::RunInternal() {
418 DCHECK_EQ(this, current());
422 #if !defined(OS_MACOSX) && !defined(OS_ANDROID)
423 if (run_loop_
->dispatcher_
&& type() == TYPE_UI
) {
424 static_cast<base::MessagePumpForUI
*>(pump_
.get())->
425 RunWithDispatcher(this, run_loop_
->dispatcher_
);
433 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
434 if (run_loop_
->run_depth_
!= 1)
437 if (deferred_non_nestable_work_queue_
.empty())
440 PendingTask pending_task
= deferred_non_nestable_work_queue_
.front();
441 deferred_non_nestable_work_queue_
.pop();
443 RunTask(pending_task
);
447 void MessageLoop::RunTask(const PendingTask
& pending_task
) {
448 TRACE_EVENT_FLOW_END0("task", "MessageLoop::PostTask",
449 TRACE_ID_MANGLE(GetTaskTraceID(pending_task
, this)));
450 TRACE_EVENT2("task", "MessageLoop::RunTask",
451 "src_file", pending_task
.posted_from
.file_name(),
452 "src_func", pending_task
.posted_from
.function_name());
453 DCHECK(nestable_tasks_allowed_
);
454 // Execute the task and assume the worst: It is probably not reentrant.
455 nestable_tasks_allowed_
= false;
457 // Before running the task, store the program counter where it was posted
458 // and deliberately alias it to ensure it is on the stack if the task
459 // crashes. Be careful not to assume that the variable itself will have the
460 // expected value when displayed by the optimizer in an optimized build.
461 // Look at a memory dump of the stack.
462 const void* program_counter
=
463 pending_task
.posted_from
.program_counter();
464 base::debug::Alias(&program_counter
);
466 HistogramEvent(kTaskRunEvent
);
468 tracked_objects::TrackedTime start_time
=
469 tracked_objects::ThreadData::NowForStartOfRun(pending_task
.birth_tally
);
471 FOR_EACH_OBSERVER(TaskObserver
, task_observers_
,
472 WillProcessTask(pending_task
.time_posted
));
473 pending_task
.task
.Run();
474 FOR_EACH_OBSERVER(TaskObserver
, task_observers_
,
475 DidProcessTask(pending_task
.time_posted
));
477 tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(pending_task
,
478 start_time
, tracked_objects::ThreadData::NowForEndOfRun());
480 nestable_tasks_allowed_
= true;
483 bool MessageLoop::DeferOrRunPendingTask(const PendingTask
& pending_task
) {
484 if (pending_task
.nestable
|| run_loop_
->run_depth_
== 1) {
485 RunTask(pending_task
);
486 // Show that we ran a task (Note: a new one might arrive as a
491 // We couldn't run the task now because we're in a nested message loop
492 // and the task isn't nestable.
493 deferred_non_nestable_work_queue_
.push(pending_task
);
497 void MessageLoop::AddToDelayedWorkQueue(const PendingTask
& pending_task
) {
498 // Move to the delayed work queue.
499 delayed_work_queue_
.push(pending_task
);
502 void MessageLoop::ReloadWorkQueue() {
503 // We can improve performance of our loading tasks from incoming_queue_ to
504 // work_queue_ by waiting until the last minute (work_queue_ is empty) to
505 // load. That reduces the number of locks-per-task significantly when our
507 if (!work_queue_
.empty())
508 return; // Wait till we *really* need to lock and load.
510 // Acquire all we can from the inter-thread queue with one lock acquisition.
512 base::AutoLock
lock(incoming_queue_lock_
);
513 if (incoming_queue_
.empty())
515 incoming_queue_
.Swap(&work_queue_
); // Constant time
516 DCHECK(incoming_queue_
.empty());
520 bool MessageLoop::DeletePendingTasks() {
521 bool did_work
= !work_queue_
.empty();
522 while (!work_queue_
.empty()) {
523 PendingTask pending_task
= work_queue_
.front();
525 if (!pending_task
.delayed_run_time
.is_null()) {
526 // We want to delete delayed tasks in the same order in which they would
527 // normally be deleted in case of any funny dependencies between delayed
529 AddToDelayedWorkQueue(pending_task
);
532 did_work
|= !deferred_non_nestable_work_queue_
.empty();
533 while (!deferred_non_nestable_work_queue_
.empty()) {
534 deferred_non_nestable_work_queue_
.pop();
536 did_work
|= !delayed_work_queue_
.empty();
538 // Historically, we always delete the task regardless of valgrind status. It's
539 // not completely clear why we want to leak them in the loops above. This
540 // code is replicating legacy behavior, and should not be considered
541 // absolutely "correct" behavior. See TODO above about deleting all tasks
543 while (!delayed_work_queue_
.empty()) {
544 delayed_work_queue_
.pop();
549 TimeTicks
MessageLoop::CalculateDelayedRuntime(TimeDelta delay
) {
550 TimeTicks delayed_run_time
;
551 if (delay
> TimeDelta()) {
552 delayed_run_time
= TimeTicks::Now() + delay
;
555 if (high_resolution_timer_expiration_
.is_null()) {
556 // Windows timers are granular to 15.6ms. If we only set high-res
557 // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms,
558 // which as a percentage is pretty inaccurate. So enable high
559 // res timers for any timer which is within 2x of the granularity.
560 // This is a tradeoff between accuracy and power management.
561 bool needs_high_res_timers
= delay
.InMilliseconds() <
562 (2 * base::Time::kMinLowResolutionThresholdMs
);
563 if (needs_high_res_timers
) {
564 if (base::Time::ActivateHighResolutionTimer(true)) {
565 high_resolution_timer_expiration_
= TimeTicks::Now() +
566 TimeDelta::FromMilliseconds(kHighResolutionTimerModeLeaseTimeMs
);
572 DCHECK_EQ(delay
.InMilliseconds(), 0) << "delay should not be negative";
576 if (!high_resolution_timer_expiration_
.is_null()) {
577 if (TimeTicks::Now() > high_resolution_timer_expiration_
) {
578 base::Time::ActivateHighResolutionTimer(false);
579 high_resolution_timer_expiration_
= TimeTicks();
584 return delayed_run_time
;
587 // Possibly called on a background thread!
588 void MessageLoop::AddToIncomingQueue(PendingTask
* pending_task
) {
589 // Warning: Don't try to short-circuit, and handle this thread's tasks more
590 // directly, as it could starve handling of foreign threads. Put every task
593 scoped_refptr
<base::MessagePump
> pump
;
595 base::AutoLock
locked(incoming_queue_lock_
);
597 // Initialize the sequence number. The sequence number is used for delayed
598 // tasks (to faciliate FIFO sorting when two tasks have the same
599 // delayed_run_time value) and for identifying the task in about:tracing.
600 pending_task
->sequence_num
= next_sequence_num_
++;
602 TRACE_EVENT_FLOW_BEGIN0("task", "MessageLoop::PostTask",
603 TRACE_ID_MANGLE(GetTaskTraceID(*pending_task
, this)));
605 bool was_empty
= incoming_queue_
.empty();
606 incoming_queue_
.push(*pending_task
);
607 pending_task
->task
.Reset();
609 return; // Someone else should have started the sub-pump.
613 // Since the incoming_queue_ may contain a task that destroys this message
614 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|.
615 // We use a stack-based reference to the message pump so that we can call
616 // ScheduleWork outside of incoming_queue_lock_.
618 pump
->ScheduleWork();
621 //------------------------------------------------------------------------------
622 // Method and data for histogramming events and actions taken by each instance
625 void MessageLoop::StartHistogrammer() {
626 #if !defined(OS_NACL) // NaCl build has no metrics code.
627 if (enable_histogrammer_
&& !message_histogram_
628 && base::StatisticsRecorder::IsActive()) {
629 DCHECK(!thread_name_
.empty());
630 message_histogram_
= base::LinearHistogram::FactoryGetWithRangeDescription(
631 "MsgLoop:" + thread_name_
,
632 kLeastNonZeroMessageId
, kMaxMessageId
,
633 kNumberOfDistinctMessagesDisplayed
,
634 message_histogram_
->kHexRangePrintingFlag
,
635 event_descriptions_
);
640 void MessageLoop::HistogramEvent(int event
) {
641 #if !defined(OS_NACL)
642 if (message_histogram_
)
643 message_histogram_
->Add(event
);
647 bool MessageLoop::DoWork() {
648 if (!nestable_tasks_allowed_
) {
649 // Task can't be executed right now.
655 if (work_queue_
.empty())
658 // Execute oldest task.
660 PendingTask pending_task
= work_queue_
.front();
662 if (!pending_task
.delayed_run_time
.is_null()) {
663 AddToDelayedWorkQueue(pending_task
);
664 // If we changed the topmost task, then it is time to reschedule.
665 if (delayed_work_queue_
.top().task
.Equals(pending_task
.task
))
666 pump_
->ScheduleDelayedWork(pending_task
.delayed_run_time
);
668 if (DeferOrRunPendingTask(pending_task
))
671 } while (!work_queue_
.empty());
678 bool MessageLoop::DoDelayedWork(TimeTicks
* next_delayed_work_time
) {
679 if (!nestable_tasks_allowed_
|| delayed_work_queue_
.empty()) {
680 recent_time_
= *next_delayed_work_time
= TimeTicks();
684 // When we "fall behind," there will be a lot of tasks in the delayed work
685 // queue that are ready to run. To increase efficiency when we fall behind,
686 // we will only call Time::Now() intermittently, and then process all tasks
687 // that are ready to run before calling it again. As a result, the more we
688 // fall behind (and have a lot of ready-to-run delayed tasks), the more
689 // efficient we'll be at handling the tasks.
691 TimeTicks next_run_time
= delayed_work_queue_
.top().delayed_run_time
;
692 if (next_run_time
> recent_time_
) {
693 recent_time_
= TimeTicks::Now(); // Get a better view of Now();
694 if (next_run_time
> recent_time_
) {
695 *next_delayed_work_time
= next_run_time
;
700 PendingTask pending_task
= delayed_work_queue_
.top();
701 delayed_work_queue_
.pop();
703 if (!delayed_work_queue_
.empty())
704 *next_delayed_work_time
= delayed_work_queue_
.top().delayed_run_time
;
706 return DeferOrRunPendingTask(pending_task
);
709 bool MessageLoop::DoIdleWork() {
710 if (ProcessNextDelayedNonNestableTask())
713 if (run_loop_
->quit_when_idle_received_
)
719 void MessageLoop::DeleteSoonInternal(const tracked_objects::Location
& from_here
,
720 void(*deleter
)(const void*),
721 const void* object
) {
722 PostNonNestableTask(from_here
, base::Bind(deleter
, object
));
725 void MessageLoop::ReleaseSoonInternal(
726 const tracked_objects::Location
& from_here
,
727 void(*releaser
)(const void*),
728 const void* object
) {
729 PostNonNestableTask(from_here
, base::Bind(releaser
, object
));
732 //------------------------------------------------------------------------------
736 void MessageLoopForUI::DidProcessMessage(const MSG
& message
) {
737 pump_win()->DidProcessMessage(message
);
739 #endif // defined(OS_WIN)
741 #if defined(OS_ANDROID)
742 void MessageLoopForUI::Start() {
743 // No Histogram support for UI message loop as it is managed by Java side
744 static_cast<base::MessagePumpForUI
*>(pump_
.get())->Start(this);
749 void MessageLoopForUI::Attach() {
750 static_cast<base::MessagePumpUIApplication
*>(pump_
.get())->Attach(this);
754 #if !defined(OS_MACOSX) && !defined(OS_NACL) && !defined(OS_ANDROID)
755 void MessageLoopForUI::AddObserver(Observer
* observer
) {
756 pump_ui()->AddObserver(observer
);
759 void MessageLoopForUI::RemoveObserver(Observer
* observer
) {
760 pump_ui()->RemoveObserver(observer
);
763 #endif // !defined(OS_MACOSX) && !defined(OS_NACL) && !defined(OS_ANDROID)
765 //------------------------------------------------------------------------------
770 void MessageLoopForIO::RegisterIOHandler(HANDLE file
, IOHandler
* handler
) {
771 pump_io()->RegisterIOHandler(file
, handler
);
774 bool MessageLoopForIO::RegisterJobObject(HANDLE job
, IOHandler
* handler
) {
775 return pump_io()->RegisterJobObject(job
, handler
);
778 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout
, IOHandler
* filter
) {
779 return pump_io()->WaitForIOCompletion(timeout
, filter
);
782 #elif defined(OS_IOS)
784 bool MessageLoopForIO::WatchFileDescriptor(int fd
,
787 FileDescriptorWatcher
*controller
,
789 return pump_io()->WatchFileDescriptor(
797 #elif defined(OS_POSIX) && !defined(OS_NACL)
799 bool MessageLoopForIO::WatchFileDescriptor(int fd
,
802 FileDescriptorWatcher
*controller
,
804 return pump_libevent()->WatchFileDescriptor(