Disable crashing tests, my previous checkin to mark them flaky did not help.
[chromium-blink-merge.git] / base / message_loop.cc
blob1154c3e91476160e7f34e643c2e17f7cc8e8c889
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_loop.h"
7 #if defined(OS_POSIX) && !defined(OS_MACOSX)
8 #include <gdk/gdk.h>
9 #include <gdk/gdkx.h>
10 #endif
12 #include <algorithm>
14 #include "base/compiler_specific.h"
15 #include "base/lazy_instance.h"
16 #include "base/logging.h"
17 #include "base/message_pump_default.h"
18 #include "base/metrics/histogram.h"
19 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
20 #include "base/threading/thread_local.h"
22 #if defined(OS_MACOSX)
23 #include "base/message_pump_mac.h"
24 #endif
25 #if defined(OS_POSIX)
26 #include "base/message_pump_libevent.h"
27 #endif
28 #if defined(OS_POSIX) && !defined(OS_MACOSX)
29 #include "base/message_pump_glib.h"
30 #endif
31 #if defined(TOUCH_UI)
32 #include "base/message_pump_glib_x.h"
33 #endif
35 using base::TimeDelta;
36 using base::TimeTicks;
38 namespace {
40 // A lazily created thread local storage for quick access to a thread's message
41 // loop, if one exists. This should be safe and free of static constructors.
42 base::LazyInstance<base::ThreadLocalPointer<MessageLoop> > lazy_tls_ptr(
43 base::LINKER_INITIALIZED);
45 // Logical events for Histogram profiling. Run with -message-loop-histogrammer
46 // to get an accounting of messages and actions taken on each thread.
47 const int kTaskRunEvent = 0x1;
48 const int kTimerEvent = 0x2;
50 // Provide range of message IDs for use in histogramming and debug display.
51 const int kLeastNonZeroMessageId = 1;
52 const int kMaxMessageId = 1099;
53 const int kNumberOfDistinctMessagesDisplayed = 1100;
55 // Provide a macro that takes an expression (such as a constant, or macro
56 // constant) and creates a pair to initalize an array of pairs. In this case,
57 // our pair consists of the expressions value, and the "stringized" version
58 // of the expression (i.e., the exrpression put in quotes). For example, if
59 // we have:
60 // #define FOO 2
61 // #define BAR 5
62 // then the following:
63 // VALUE_TO_NUMBER_AND_NAME(FOO + BAR)
64 // will expand to:
65 // {7, "FOO + BAR"}
66 // We use the resulting array as an argument to our histogram, which reads the
67 // number as a bucket identifier, and proceeds to use the corresponding name
68 // in the pair (i.e., the quoted string) when printing out a histogram.
69 #define VALUE_TO_NUMBER_AND_NAME(name) {name, #name},
71 const base::LinearHistogram::DescriptionPair event_descriptions_[] = {
72 // Provide some pretty print capability in our histogram for our internal
73 // messages.
75 // A few events we handle (kindred to messages), and used to profile actions.
76 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent)
77 VALUE_TO_NUMBER_AND_NAME(kTimerEvent)
79 {-1, NULL} // The list must be null terminated, per API to histogram.
82 bool enable_histogrammer_ = false;
84 } // namespace
86 //------------------------------------------------------------------------------
88 #if defined(OS_WIN)
90 // Upon a SEH exception in this thread, it restores the original unhandled
91 // exception filter.
92 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) {
93 ::SetUnhandledExceptionFilter(old_filter);
94 return EXCEPTION_CONTINUE_SEARCH;
97 // Retrieves a pointer to the current unhandled exception filter. There
98 // is no standalone getter method.
99 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() {
100 LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL;
101 top_filter = ::SetUnhandledExceptionFilter(0);
102 ::SetUnhandledExceptionFilter(top_filter);
103 return top_filter;
106 #endif // defined(OS_WIN)
108 //------------------------------------------------------------------------------
110 MessageLoop::TaskObserver::TaskObserver() {
113 MessageLoop::TaskObserver::~TaskObserver() {
116 MessageLoop::DestructionObserver::~DestructionObserver() {
119 //------------------------------------------------------------------------------
121 MessageLoop::MessageLoop(Type type)
122 : type_(type),
123 nestable_tasks_allowed_(true),
124 exception_restoration_(false),
125 message_histogram_(NULL),
126 state_(NULL),
127 #ifdef OS_WIN
128 os_modal_loop_(false),
129 #endif // OS_WIN
130 next_sequence_num_(0) {
131 DCHECK(!current()) << "should only have one message loop per thread";
132 lazy_tls_ptr.Pointer()->Set(this);
134 // TODO(rvargas): Get rid of the OS guards.
135 #if defined(OS_WIN)
136 #define MESSAGE_PUMP_UI new base::MessagePumpForUI()
137 #define MESSAGE_PUMP_IO new base::MessagePumpForIO()
138 #elif defined(OS_MACOSX)
139 #define MESSAGE_PUMP_UI base::MessagePumpMac::Create()
140 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
141 #elif defined(TOUCH_UI)
142 #define MESSAGE_PUMP_UI new base::MessagePumpGlibX()
143 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
144 #elif defined(OS_NACL)
145 // Currently NaCl doesn't have a UI or an IO MessageLoop.
146 // TODO(abarth): Figure out if we need these.
147 #define MESSAGE_PUMP_UI NULL
148 #define MESSAGE_PUMP_IO NULL
149 #elif defined(OS_POSIX) // POSIX but not MACOSX.
150 #define MESSAGE_PUMP_UI new base::MessagePumpForUI()
151 #define MESSAGE_PUMP_IO new base::MessagePumpLibevent()
152 #else
153 #error Not implemented
154 #endif
156 if (type_ == TYPE_UI) {
157 pump_ = MESSAGE_PUMP_UI;
158 } else if (type_ == TYPE_IO) {
159 pump_ = MESSAGE_PUMP_IO;
160 } else {
161 DCHECK_EQ(TYPE_DEFAULT, type_);
162 pump_ = new base::MessagePumpDefault();
166 MessageLoop::~MessageLoop() {
167 DCHECK_EQ(this, current());
169 DCHECK(!state_);
171 // Clean up any unprocessed tasks, but take care: deleting a task could
172 // result in the addition of more tasks (e.g., via DeleteSoon). We set a
173 // limit on the number of times we will allow a deleted task to generate more
174 // tasks. Normally, we should only pass through this loop once or twice. If
175 // we end up hitting the loop limit, then it is probably due to one task that
176 // is being stubborn. Inspect the queues to see who is left.
177 bool did_work;
178 for (int i = 0; i < 100; ++i) {
179 DeletePendingTasks();
180 ReloadWorkQueue();
181 // If we end up with empty queues, then break out of the loop.
182 did_work = DeletePendingTasks();
183 if (!did_work)
184 break;
186 DCHECK(!did_work);
188 // Let interested parties have one last shot at accessing this.
189 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
190 WillDestroyCurrentMessageLoop());
192 // OK, now make it so that no one can find us.
193 lazy_tls_ptr.Pointer()->Set(NULL);
196 // static
197 MessageLoop* MessageLoop::current() {
198 // TODO(darin): sadly, we cannot enable this yet since people call us even
199 // when they have no intention of using us.
200 // DCHECK(loop) << "Ouch, did you forget to initialize me?";
201 return lazy_tls_ptr.Pointer()->Get();
204 // static
205 void MessageLoop::EnableHistogrammer(bool enable) {
206 enable_histogrammer_ = enable;
209 void MessageLoop::AddDestructionObserver(
210 DestructionObserver* destruction_observer) {
211 DCHECK_EQ(this, current());
212 destruction_observers_.AddObserver(destruction_observer);
215 void MessageLoop::RemoveDestructionObserver(
216 DestructionObserver* destruction_observer) {
217 DCHECK_EQ(this, current());
218 destruction_observers_.RemoveObserver(destruction_observer);
221 void MessageLoop::PostTask(
222 const tracked_objects::Location& from_here, Task* task) {
223 PostTask_Helper(from_here, task, 0, true);
226 void MessageLoop::PostDelayedTask(
227 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) {
228 PostTask_Helper(from_here, task, delay_ms, true);
231 void MessageLoop::PostNonNestableTask(
232 const tracked_objects::Location& from_here, Task* task) {
233 PostTask_Helper(from_here, task, 0, false);
236 void MessageLoop::PostNonNestableDelayedTask(
237 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) {
238 PostTask_Helper(from_here, task, delay_ms, false);
241 void MessageLoop::Run() {
242 AutoRunState save_state(this);
243 RunHandler();
246 void MessageLoop::RunAllPending() {
247 AutoRunState save_state(this);
248 state_->quit_received = true; // Means run until we would otherwise block.
249 RunHandler();
252 void MessageLoop::Quit() {
253 DCHECK_EQ(this, current());
254 if (state_) {
255 state_->quit_received = true;
256 } else {
257 NOTREACHED() << "Must be inside Run to call Quit";
261 void MessageLoop::QuitNow() {
262 DCHECK_EQ(this, current());
263 if (state_) {
264 pump_->Quit();
265 } else {
266 NOTREACHED() << "Must be inside Run to call Quit";
270 void MessageLoop::SetNestableTasksAllowed(bool allowed) {
271 if (nestable_tasks_allowed_ != allowed) {
272 nestable_tasks_allowed_ = allowed;
273 if (!nestable_tasks_allowed_)
274 return;
275 // Start the native pump if we are not already pumping.
276 pump_->ScheduleWork();
280 bool MessageLoop::NestableTasksAllowed() const {
281 return nestable_tasks_allowed_;
284 bool MessageLoop::IsNested() {
285 return state_->run_depth > 1;
288 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
289 DCHECK_EQ(this, current());
290 task_observers_.AddObserver(task_observer);
293 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
294 DCHECK_EQ(this, current());
295 task_observers_.RemoveObserver(task_observer);
298 void MessageLoop::AssertIdle() const {
299 // We only check |incoming_queue_|, since we don't want to lock |work_queue_|.
300 base::AutoLock lock(incoming_queue_lock_);
301 DCHECK(incoming_queue_.empty());
304 //------------------------------------------------------------------------------
306 // Runs the loop in two different SEH modes:
307 // enable_SEH_restoration_ = false : any unhandled exception goes to the last
308 // one that calls SetUnhandledExceptionFilter().
309 // enable_SEH_restoration_ = true : any unhandled exception goes to the filter
310 // that was existed before the loop was run.
311 void MessageLoop::RunHandler() {
312 #if defined(OS_WIN)
313 if (exception_restoration_) {
314 RunInternalInSEHFrame();
315 return;
317 #endif
319 RunInternal();
322 #if defined(OS_WIN)
323 __declspec(noinline) void MessageLoop::RunInternalInSEHFrame() {
324 LPTOP_LEVEL_EXCEPTION_FILTER current_filter = GetTopSEHFilter();
325 __try {
326 RunInternal();
327 } __except(SEHFilter(current_filter)) {
329 return;
331 #endif
333 void MessageLoop::RunInternal() {
334 DCHECK_EQ(this, current());
336 StartHistogrammer();
338 #if !defined(OS_MACOSX)
339 if (state_->dispatcher && type() == TYPE_UI) {
340 static_cast<base::MessagePumpForUI*>(pump_.get())->
341 RunWithDispatcher(this, state_->dispatcher);
342 return;
344 #endif
346 pump_->Run(this);
349 bool MessageLoop::ProcessNextDelayedNonNestableTask() {
350 if (state_->run_depth != 1)
351 return false;
353 if (deferred_non_nestable_work_queue_.empty())
354 return false;
356 Task* task = deferred_non_nestable_work_queue_.front().task;
357 deferred_non_nestable_work_queue_.pop();
359 RunTask(task);
360 return true;
363 void MessageLoop::RunTask(Task* task) {
364 DCHECK(nestable_tasks_allowed_);
365 // Execute the task and assume the worst: It is probably not reentrant.
366 nestable_tasks_allowed_ = false;
368 HistogramEvent(kTaskRunEvent);
369 FOR_EACH_OBSERVER(TaskObserver, task_observers_,
370 WillProcessTask(task));
371 task->Run();
372 FOR_EACH_OBSERVER(TaskObserver, task_observers_, DidProcessTask(task));
373 delete task;
375 nestable_tasks_allowed_ = true;
378 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) {
379 if (pending_task.nestable || state_->run_depth == 1) {
380 RunTask(pending_task.task);
381 // Show that we ran a task (Note: a new one might arrive as a
382 // consequence!).
383 return true;
386 // We couldn't run the task now because we're in a nested message loop
387 // and the task isn't nestable.
388 deferred_non_nestable_work_queue_.push(pending_task);
389 return false;
392 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
393 // Move to the delayed work queue. Initialize the sequence number
394 // before inserting into the delayed_work_queue_. The sequence number
395 // is used to faciliate FIFO sorting when two tasks have the same
396 // delayed_run_time value.
397 PendingTask new_pending_task(pending_task);
398 new_pending_task.sequence_num = next_sequence_num_++;
399 delayed_work_queue_.push(new_pending_task);
402 void MessageLoop::ReloadWorkQueue() {
403 // We can improve performance of our loading tasks from incoming_queue_ to
404 // work_queue_ by waiting until the last minute (work_queue_ is empty) to
405 // load. That reduces the number of locks-per-task significantly when our
406 // queues get large.
407 if (!work_queue_.empty())
408 return; // Wait till we *really* need to lock and load.
410 // Acquire all we can from the inter-thread queue with one lock acquisition.
412 base::AutoLock lock(incoming_queue_lock_);
413 if (incoming_queue_.empty())
414 return;
415 incoming_queue_.Swap(&work_queue_); // Constant time
416 DCHECK(incoming_queue_.empty());
420 bool MessageLoop::DeletePendingTasks() {
421 bool did_work = !work_queue_.empty();
422 while (!work_queue_.empty()) {
423 PendingTask pending_task = work_queue_.front();
424 work_queue_.pop();
425 if (!pending_task.delayed_run_time.is_null()) {
426 // We want to delete delayed tasks in the same order in which they would
427 // normally be deleted in case of any funny dependencies between delayed
428 // tasks.
429 AddToDelayedWorkQueue(pending_task);
430 } else {
431 // TODO(darin): Delete all tasks once it is safe to do so.
432 // Until it is totally safe, just do it when running Purify or
433 // Valgrind.
434 #if defined(PURIFY) || defined(USE_HEAPCHECKER)
435 delete pending_task.task;
436 #else
437 if (RunningOnValgrind())
438 delete pending_task.task;
439 #endif // defined(OS_POSIX)
442 did_work |= !deferred_non_nestable_work_queue_.empty();
443 while (!deferred_non_nestable_work_queue_.empty()) {
444 // TODO(darin): Delete all tasks once it is safe to do so.
445 // Until it is totaly safe, only delete them under Purify and Valgrind.
446 Task* task = NULL;
447 #if defined(PURIFY) || defined(USE_HEAPCHECKER)
448 task = deferred_non_nestable_work_queue_.front().task;
449 #else
450 if (RunningOnValgrind())
451 task = deferred_non_nestable_work_queue_.front().task;
452 #endif
453 deferred_non_nestable_work_queue_.pop();
454 if (task)
455 delete task;
457 did_work |= !delayed_work_queue_.empty();
458 while (!delayed_work_queue_.empty()) {
459 Task* task = delayed_work_queue_.top().task;
460 delayed_work_queue_.pop();
461 delete task;
463 return did_work;
466 // Possibly called on a background thread!
467 void MessageLoop::PostTask_Helper(
468 const tracked_objects::Location& from_here, Task* task, int64 delay_ms,
469 bool nestable) {
470 task->SetBirthPlace(from_here);
472 PendingTask pending_task(task, nestable);
474 if (delay_ms > 0) {
475 pending_task.delayed_run_time =
476 TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms);
478 #if defined(OS_WIN)
479 if (high_resolution_timer_expiration_.is_null()) {
480 // Windows timers are granular to 15.6ms. If we only set high-res
481 // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms,
482 // which as a percentage is pretty inaccurate. So enable high
483 // res timers for any timer which is within 2x of the granularity.
484 // This is a tradeoff between accuracy and power management.
485 bool needs_high_res_timers =
486 delay_ms < (2 * base::Time::kMinLowResolutionThresholdMs);
487 if (needs_high_res_timers) {
488 base::Time::ActivateHighResolutionTimer(true);
489 high_resolution_timer_expiration_ = TimeTicks::Now() +
490 TimeDelta::FromMilliseconds(kHighResolutionTimerModeLeaseTimeMs);
493 #endif
494 } else {
495 DCHECK_EQ(delay_ms, 0) << "delay should not be negative";
498 #if defined(OS_WIN)
499 if (!high_resolution_timer_expiration_.is_null()) {
500 if (TimeTicks::Now() > high_resolution_timer_expiration_) {
501 base::Time::ActivateHighResolutionTimer(false);
502 high_resolution_timer_expiration_ = TimeTicks();
505 #endif
507 // Warning: Don't try to short-circuit, and handle this thread's tasks more
508 // directly, as it could starve handling of foreign threads. Put every task
509 // into this queue.
511 scoped_refptr<base::MessagePump> pump;
513 base::AutoLock locked(incoming_queue_lock_);
515 bool was_empty = incoming_queue_.empty();
516 incoming_queue_.push(pending_task);
517 if (!was_empty)
518 return; // Someone else should have started the sub-pump.
520 pump = pump_;
522 // Since the incoming_queue_ may contain a task that destroys this message
523 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|.
524 // We use a stack-based reference to the message pump so that we can call
525 // ScheduleWork outside of incoming_queue_lock_.
527 pump->ScheduleWork();
530 //------------------------------------------------------------------------------
531 // Method and data for histogramming events and actions taken by each instance
532 // on each thread.
534 void MessageLoop::StartHistogrammer() {
535 if (enable_histogrammer_ && !message_histogram_
536 && base::StatisticsRecorder::IsActive()) {
537 DCHECK(!thread_name_.empty());
538 message_histogram_ = base::LinearHistogram::FactoryGet(
539 "MsgLoop:" + thread_name_,
540 kLeastNonZeroMessageId, kMaxMessageId,
541 kNumberOfDistinctMessagesDisplayed,
542 message_histogram_->kHexRangePrintingFlag);
543 message_histogram_->SetRangeDescriptions(event_descriptions_);
547 void MessageLoop::HistogramEvent(int event) {
548 if (message_histogram_)
549 message_histogram_->Add(event);
552 bool MessageLoop::DoWork() {
553 if (!nestable_tasks_allowed_) {
554 // Task can't be executed right now.
555 return false;
558 for (;;) {
559 ReloadWorkQueue();
560 if (work_queue_.empty())
561 break;
563 // Execute oldest task.
564 do {
565 PendingTask pending_task = work_queue_.front();
566 work_queue_.pop();
567 if (!pending_task.delayed_run_time.is_null()) {
568 AddToDelayedWorkQueue(pending_task);
569 // If we changed the topmost task, then it is time to re-schedule.
570 if (delayed_work_queue_.top().task == pending_task.task)
571 pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
572 } else {
573 if (DeferOrRunPendingTask(pending_task))
574 return true;
576 } while (!work_queue_.empty());
579 // Nothing happened.
580 return false;
583 bool MessageLoop::DoDelayedWork(base::TimeTicks* next_delayed_work_time) {
584 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
585 recent_time_ = *next_delayed_work_time = TimeTicks();
586 return false;
589 // When we "fall behind," there will be a lot of tasks in the delayed work
590 // queue that are ready to run. To increase efficiency when we fall behind,
591 // we will only call Time::Now() intermittently, and then process all tasks
592 // that are ready to run before calling it again. As a result, the more we
593 // fall behind (and have a lot of ready-to-run delayed tasks), the more
594 // efficient we'll be at handling the tasks.
596 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time;
597 if (next_run_time > recent_time_) {
598 recent_time_ = TimeTicks::Now(); // Get a better view of Now();
599 if (next_run_time > recent_time_) {
600 *next_delayed_work_time = next_run_time;
601 return false;
605 PendingTask pending_task = delayed_work_queue_.top();
606 delayed_work_queue_.pop();
608 if (!delayed_work_queue_.empty())
609 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
611 return DeferOrRunPendingTask(pending_task);
614 bool MessageLoop::DoIdleWork() {
615 if (ProcessNextDelayedNonNestableTask())
616 return true;
618 if (state_->quit_received)
619 pump_->Quit();
621 return false;
624 //------------------------------------------------------------------------------
625 // MessageLoop::AutoRunState
627 MessageLoop::AutoRunState::AutoRunState(MessageLoop* loop) : loop_(loop) {
628 // Make the loop reference us.
629 previous_state_ = loop_->state_;
630 if (previous_state_) {
631 run_depth = previous_state_->run_depth + 1;
632 } else {
633 run_depth = 1;
635 loop_->state_ = this;
637 // Initialize the other fields:
638 quit_received = false;
639 #if !defined(OS_MACOSX)
640 dispatcher = NULL;
641 #endif
644 MessageLoop::AutoRunState::~AutoRunState() {
645 loop_->state_ = previous_state_;
648 //------------------------------------------------------------------------------
649 // MessageLoop::PendingTask
651 bool MessageLoop::PendingTask::operator<(const PendingTask& other) const {
652 // Since the top of a priority queue is defined as the "greatest" element, we
653 // need to invert the comparison here. We want the smaller time to be at the
654 // top of the heap.
656 if (delayed_run_time < other.delayed_run_time)
657 return false;
659 if (delayed_run_time > other.delayed_run_time)
660 return true;
662 // If the times happen to match, then we use the sequence number to decide.
663 // Compare the difference to support integer roll-over.
664 return (sequence_num - other.sequence_num) > 0;
667 //------------------------------------------------------------------------------
668 // MessageLoopForUI
670 #if defined(OS_WIN)
671 void MessageLoopForUI::DidProcessMessage(const MSG& message) {
672 pump_win()->DidProcessMessage(message);
674 #endif // defined(OS_WIN)
676 #if defined(USE_X11)
677 Display* MessageLoopForUI::GetDisplay() {
678 return gdk_x11_get_default_xdisplay();
680 #endif // defined(USE_X11)
682 #if !defined(OS_MACOSX) && !defined(OS_NACL)
683 void MessageLoopForUI::AddObserver(Observer* observer) {
684 pump_ui()->AddObserver(observer);
687 void MessageLoopForUI::RemoveObserver(Observer* observer) {
688 pump_ui()->RemoveObserver(observer);
691 void MessageLoopForUI::Run(Dispatcher* dispatcher) {
692 AutoRunState save_state(this);
693 state_->dispatcher = dispatcher;
694 RunHandler();
696 #endif // !defined(OS_MACOSX) && !defined(OS_NACL)
698 //------------------------------------------------------------------------------
699 // MessageLoopForIO
701 #if defined(OS_WIN)
703 void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
704 pump_io()->RegisterIOHandler(file, handler);
707 bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
708 return pump_io()->WaitForIOCompletion(timeout, filter);
711 #elif defined(OS_POSIX) && !defined(OS_NACL)
713 bool MessageLoopForIO::WatchFileDescriptor(int fd,
714 bool persistent,
715 Mode mode,
716 FileDescriptorWatcher *controller,
717 Watcher *delegate) {
718 return pump_libevent()->WatchFileDescriptor(
720 persistent,
721 static_cast<base::MessagePumpLibevent::Mode>(mode),
722 controller,
723 delegate);
726 #endif