Fix platform conditionalization of NaCl IRT file name
[chromium-blink-merge.git] / base / message_pump_win.cc
blob740ddd9db4d4703b30e61bd6f13382f1c25eabeb
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/message_pump_win.h"
7 #include <math.h>
9 #include "base/message_loop.h"
10 #include "base/metrics/histogram.h"
11 #include "base/win/wrapped_window_proc.h"
13 namespace base {
15 static const wchar_t kWndClass[] = L"Chrome_MessagePumpWindow";
17 // Message sent to get an additional time slice for pumping (processing) another
18 // task (a series of such messages creates a continuous task pump).
19 static const int kMsgHaveWork = WM_USER + 1;
21 //-----------------------------------------------------------------------------
22 // MessagePumpWin public:
24 void MessagePumpWin::AddObserver(MessagePumpObserver* observer) {
25 observers_.AddObserver(observer);
28 void MessagePumpWin::RemoveObserver(MessagePumpObserver* observer) {
29 observers_.RemoveObserver(observer);
32 void MessagePumpWin::WillProcessMessage(const MSG& msg) {
33 FOR_EACH_OBSERVER(MessagePumpObserver, observers_, WillProcessEvent(msg));
36 void MessagePumpWin::DidProcessMessage(const MSG& msg) {
37 FOR_EACH_OBSERVER(MessagePumpObserver, observers_, DidProcessEvent(msg));
40 void MessagePumpWin::RunWithDispatcher(
41 Delegate* delegate, Dispatcher* dispatcher) {
42 RunState s;
43 s.delegate = delegate;
44 s.dispatcher = dispatcher;
45 s.should_quit = false;
46 s.run_depth = state_ ? state_->run_depth + 1 : 1;
48 RunState* previous_state = state_;
49 state_ = &s;
51 DoRunLoop();
53 state_ = previous_state;
56 void MessagePumpWin::Quit() {
57 DCHECK(state_);
58 state_->should_quit = true;
61 //-----------------------------------------------------------------------------
62 // MessagePumpWin protected:
64 int MessagePumpWin::GetCurrentDelay() const {
65 if (delayed_work_time_.is_null())
66 return -1;
68 // Be careful here. TimeDelta has a precision of microseconds, but we want a
69 // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
70 // 6? It should be 6 to avoid executing delayed work too early.
71 double timeout =
72 ceil((delayed_work_time_ - TimeTicks::Now()).InMillisecondsF());
74 // If this value is negative, then we need to run delayed work soon.
75 int delay = static_cast<int>(timeout);
76 if (delay < 0)
77 delay = 0;
79 return delay;
82 //-----------------------------------------------------------------------------
83 // MessagePumpForUI public:
85 MessagePumpForUI::MessagePumpForUI() {
86 InitMessageWnd();
89 MessagePumpForUI::~MessagePumpForUI() {
90 DestroyWindow(message_hwnd_);
91 UnregisterClass(kWndClass, GetModuleHandle(NULL));
94 void MessagePumpForUI::ScheduleWork() {
95 if (InterlockedExchange(&have_work_, 1))
96 return; // Someone else continued the pumping.
98 // Make sure the MessagePump does some work for us.
99 PostMessage(message_hwnd_, kMsgHaveWork, reinterpret_cast<WPARAM>(this), 0);
102 void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
104 // We would *like* to provide high resolution timers. Windows timers using
105 // SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
106 // mechanism because the application can enter modal windows loops where it
107 // is not running our MessageLoop; the only way to have our timers fire in
108 // these cases is to post messages there.
110 // To provide sub-10ms timers, we process timers directly from our run loop.
111 // For the common case, timers will be processed there as the run loop does
112 // its normal work. However, we *also* set the system timer so that WM_TIMER
113 // events fire. This mops up the case of timers not being able to work in
114 // modal message loops. It is possible for the SetTimer to pop and have no
115 // pending timers, because they could have already been processed by the
116 // run loop itself.
118 // We use a single SetTimer corresponding to the timer that will expire
119 // soonest. As new timers are created and destroyed, we update SetTimer.
120 // Getting a spurrious SetTimer event firing is benign, as we'll just be
121 // processing an empty timer queue.
123 delayed_work_time_ = delayed_work_time;
125 int delay_msec = GetCurrentDelay();
126 DCHECK_GE(delay_msec, 0);
127 if (delay_msec < USER_TIMER_MINIMUM)
128 delay_msec = USER_TIMER_MINIMUM;
130 // Create a WM_TIMER event that will wake us up to check for any pending
131 // timers (in case we are running within a nested, external sub-pump).
132 SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this), delay_msec, NULL);
135 void MessagePumpForUI::PumpOutPendingPaintMessages() {
136 // If we are being called outside of the context of Run, then don't try to do
137 // any work.
138 if (!state_)
139 return;
141 // Create a mini-message-pump to force immediate processing of only Windows
142 // WM_PAINT messages. Don't provide an infinite loop, but do enough peeking
143 // to get the job done. Actual common max is 4 peeks, but we'll be a little
144 // safe here.
145 const int kMaxPeekCount = 20;
146 int peek_count;
147 for (peek_count = 0; peek_count < kMaxPeekCount; ++peek_count) {
148 MSG msg;
149 if (!PeekMessage(&msg, NULL, 0, 0, PM_REMOVE | PM_QS_PAINT))
150 break;
151 ProcessMessageHelper(msg);
152 if (state_->should_quit) // Handle WM_QUIT.
153 break;
155 // Histogram what was really being used, to help to adjust kMaxPeekCount.
156 DHISTOGRAM_COUNTS("Loop.PumpOutPendingPaintMessages Peeks", peek_count);
159 //-----------------------------------------------------------------------------
160 // MessagePumpForUI private:
162 // static
163 LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
164 HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
165 switch (message) {
166 case kMsgHaveWork:
167 reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
168 break;
169 case WM_TIMER:
170 reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
171 break;
173 return DefWindowProc(hwnd, message, wparam, lparam);
176 void MessagePumpForUI::DoRunLoop() {
177 // IF this was just a simple PeekMessage() loop (servicing all possible work
178 // queues), then Windows would try to achieve the following order according
179 // to MSDN documentation about PeekMessage with no filter):
180 // * Sent messages
181 // * Posted messages
182 // * Sent messages (again)
183 // * WM_PAINT messages
184 // * WM_TIMER messages
186 // Summary: none of the above classes is starved, and sent messages has twice
187 // the chance of being processed (i.e., reduced service time).
189 for (;;) {
190 // If we do any work, we may create more messages etc., and more work may
191 // possibly be waiting in another task group. When we (for example)
192 // ProcessNextWindowsMessage(), there is a good chance there are still more
193 // messages waiting. On the other hand, when any of these methods return
194 // having done no work, then it is pretty unlikely that calling them again
195 // quickly will find any work to do. Finally, if they all say they had no
196 // work, then it is a good time to consider sleeping (waiting) for more
197 // work.
199 bool more_work_is_plausible = ProcessNextWindowsMessage();
200 if (state_->should_quit)
201 break;
203 more_work_is_plausible |= state_->delegate->DoWork();
204 if (state_->should_quit)
205 break;
207 more_work_is_plausible |=
208 state_->delegate->DoDelayedWork(&delayed_work_time_);
209 // If we did not process any delayed work, then we can assume that our
210 // existing WM_TIMER if any will fire when delayed work should run. We
211 // don't want to disturb that timer if it is already in flight. However,
212 // if we did do all remaining delayed work, then lets kill the WM_TIMER.
213 if (more_work_is_plausible && delayed_work_time_.is_null())
214 KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
215 if (state_->should_quit)
216 break;
218 if (more_work_is_plausible)
219 continue;
221 more_work_is_plausible = state_->delegate->DoIdleWork();
222 if (state_->should_quit)
223 break;
225 if (more_work_is_plausible)
226 continue;
228 WaitForWork(); // Wait (sleep) until we have work to do again.
232 void MessagePumpForUI::InitMessageWnd() {
233 HINSTANCE hinst = GetModuleHandle(NULL);
235 WNDCLASSEX wc = {0};
236 wc.cbSize = sizeof(wc);
237 wc.lpfnWndProc = base::win::WrappedWindowProc<WndProcThunk>;
238 wc.hInstance = hinst;
239 wc.lpszClassName = kWndClass;
240 RegisterClassEx(&wc);
242 message_hwnd_ =
243 CreateWindow(kWndClass, 0, 0, 0, 0, 0, 0, HWND_MESSAGE, 0, hinst, 0);
244 DCHECK(message_hwnd_);
247 void MessagePumpForUI::WaitForWork() {
248 // Wait until a message is available, up to the time needed by the timer
249 // manager to fire the next set of timers.
250 int delay = GetCurrentDelay();
251 if (delay < 0) // Negative value means no timers waiting.
252 delay = INFINITE;
254 DWORD result;
255 result = MsgWaitForMultipleObjectsEx(0, NULL, delay, QS_ALLINPUT,
256 MWMO_INPUTAVAILABLE);
258 if (WAIT_OBJECT_0 == result) {
259 // A WM_* message is available.
260 // If a parent child relationship exists between windows across threads
261 // then their thread inputs are implicitly attached.
262 // This causes the MsgWaitForMultipleObjectsEx API to return indicating
263 // that messages are ready for processing (specifically mouse messages
264 // intended for the child window. Occurs if the child window has capture)
265 // The subsequent PeekMessages call fails to return any messages thus
266 // causing us to enter a tight loop at times.
267 // The WaitMessage call below is a workaround to give the child window
268 // sometime to process its input messages.
269 MSG msg = {0};
270 DWORD queue_status = GetQueueStatus(QS_MOUSE);
271 if (HIWORD(queue_status) & QS_MOUSE &&
272 !PeekMessage(&msg, NULL, WM_MOUSEFIRST, WM_MOUSELAST, PM_NOREMOVE)) {
273 WaitMessage();
275 return;
278 DCHECK_NE(WAIT_FAILED, result) << GetLastError();
281 void MessagePumpForUI::HandleWorkMessage() {
282 // If we are being called outside of the context of Run, then don't try to do
283 // any work. This could correspond to a MessageBox call or something of that
284 // sort.
285 if (!state_) {
286 // Since we handled a kMsgHaveWork message, we must still update this flag.
287 InterlockedExchange(&have_work_, 0);
288 return;
291 // Let whatever would have run had we not been putting messages in the queue
292 // run now. This is an attempt to make our dummy message not starve other
293 // messages that may be in the Windows message queue.
294 ProcessPumpReplacementMessage();
296 // Now give the delegate a chance to do some work. He'll let us know if he
297 // needs to do more work.
298 if (state_->delegate->DoWork())
299 ScheduleWork();
302 void MessagePumpForUI::HandleTimerMessage() {
303 KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
305 // If we are being called outside of the context of Run, then don't do
306 // anything. This could correspond to a MessageBox call or something of
307 // that sort.
308 if (!state_)
309 return;
311 state_->delegate->DoDelayedWork(&delayed_work_time_);
312 if (!delayed_work_time_.is_null()) {
313 // A bit gratuitous to set delayed_work_time_ again, but oh well.
314 ScheduleDelayedWork(delayed_work_time_);
318 bool MessagePumpForUI::ProcessNextWindowsMessage() {
319 // If there are sent messages in the queue then PeekMessage internally
320 // dispatches the message and returns false. We return true in this
321 // case to ensure that the message loop peeks again instead of calling
322 // MsgWaitForMultipleObjectsEx again.
323 bool sent_messages_in_queue = false;
324 DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
325 if (HIWORD(queue_status) & QS_SENDMESSAGE)
326 sent_messages_in_queue = true;
328 MSG msg;
329 if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
330 return ProcessMessageHelper(msg);
332 return sent_messages_in_queue;
335 bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
336 if (WM_QUIT == msg.message) {
337 // Repost the QUIT message so that it will be retrieved by the primary
338 // GetMessage() loop.
339 state_->should_quit = true;
340 PostQuitMessage(static_cast<int>(msg.wParam));
341 return false;
344 // While running our main message pump, we discard kMsgHaveWork messages.
345 if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
346 return ProcessPumpReplacementMessage();
348 if (CallMsgFilter(const_cast<MSG*>(&msg), kMessageFilterCode))
349 return true;
351 WillProcessMessage(msg);
353 if (state_->dispatcher) {
354 if (!state_->dispatcher->Dispatch(msg))
355 state_->should_quit = true;
356 } else {
357 TranslateMessage(&msg);
358 DispatchMessage(&msg);
361 DidProcessMessage(msg);
362 return true;
365 bool MessagePumpForUI::ProcessPumpReplacementMessage() {
366 // When we encounter a kMsgHaveWork message, this method is called to peek
367 // and process a replacement message, such as a WM_PAINT or WM_TIMER. The
368 // goal is to make the kMsgHaveWork as non-intrusive as possible, even though
369 // a continuous stream of such messages are posted. This method carefully
370 // peeks a message while there is no chance for a kMsgHaveWork to be pending,
371 // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
372 // possibly be posted), and finally dispatches that peeked replacement. Note
373 // that the re-post of kMsgHaveWork may be asynchronous to this thread!!
375 bool have_message = false;
376 MSG msg;
377 // We should not process all window messages if we are in the context of an
378 // OS modal loop, i.e. in the context of a windows API call like MessageBox.
379 // This is to ensure that these messages are peeked out by the OS modal loop.
380 if (MessageLoop::current()->os_modal_loop()) {
381 // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above.
382 have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) ||
383 PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE);
384 } else {
385 have_message = (0 != PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
388 DCHECK(!have_message || kMsgHaveWork != msg.message ||
389 msg.hwnd != message_hwnd_);
391 // Since we discarded a kMsgHaveWork message, we must update the flag.
392 int old_have_work = InterlockedExchange(&have_work_, 0);
393 DCHECK(old_have_work);
395 // We don't need a special time slice if we didn't have_message to process.
396 if (!have_message)
397 return false;
399 // Guarantee we'll get another time slice in the case where we go into native
400 // windows code. This ScheduleWork() may hurt performance a tiny bit when
401 // tasks appear very infrequently, but when the event queue is busy, the
402 // kMsgHaveWork events get (percentage wise) rarer and rarer.
403 ScheduleWork();
404 return ProcessMessageHelper(msg);
407 //-----------------------------------------------------------------------------
408 // MessagePumpForIO public:
410 MessagePumpForIO::MessagePumpForIO() {
411 port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, NULL, 1));
412 DCHECK(port_.IsValid());
415 void MessagePumpForIO::ScheduleWork() {
416 if (InterlockedExchange(&have_work_, 1))
417 return; // Someone else continued the pumping.
419 // Make sure the MessagePump does some work for us.
420 BOOL ret = PostQueuedCompletionStatus(port_, 0,
421 reinterpret_cast<ULONG_PTR>(this),
422 reinterpret_cast<OVERLAPPED*>(this));
423 DCHECK(ret);
426 void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
427 // We know that we can't be blocked right now since this method can only be
428 // called on the same thread as Run, so we only need to update our record of
429 // how long to sleep when we do sleep.
430 delayed_work_time_ = delayed_work_time;
433 void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
434 IOHandler* handler) {
435 ULONG_PTR key = reinterpret_cast<ULONG_PTR>(handler);
436 HANDLE port = CreateIoCompletionPort(file_handle, port_, key, 1);
437 DPCHECK(port);
440 //-----------------------------------------------------------------------------
441 // MessagePumpForIO private:
443 void MessagePumpForIO::DoRunLoop() {
444 for (;;) {
445 // If we do any work, we may create more messages etc., and more work may
446 // possibly be waiting in another task group. When we (for example)
447 // WaitForIOCompletion(), there is a good chance there are still more
448 // messages waiting. On the other hand, when any of these methods return
449 // having done no work, then it is pretty unlikely that calling them
450 // again quickly will find any work to do. Finally, if they all say they
451 // had no work, then it is a good time to consider sleeping (waiting) for
452 // more work.
454 bool more_work_is_plausible = state_->delegate->DoWork();
455 if (state_->should_quit)
456 break;
458 more_work_is_plausible |= WaitForIOCompletion(0, NULL);
459 if (state_->should_quit)
460 break;
462 more_work_is_plausible |=
463 state_->delegate->DoDelayedWork(&delayed_work_time_);
464 if (state_->should_quit)
465 break;
467 if (more_work_is_plausible)
468 continue;
470 more_work_is_plausible = state_->delegate->DoIdleWork();
471 if (state_->should_quit)
472 break;
474 if (more_work_is_plausible)
475 continue;
477 WaitForWork(); // Wait (sleep) until we have work to do again.
481 // Wait until IO completes, up to the time needed by the timer manager to fire
482 // the next set of timers.
483 void MessagePumpForIO::WaitForWork() {
484 // We do not support nested IO message loops. This is to avoid messy
485 // recursion problems.
486 DCHECK_EQ(1, state_->run_depth) << "Cannot nest an IO message loop!";
488 int timeout = GetCurrentDelay();
489 if (timeout < 0) // Negative value means no timers waiting.
490 timeout = INFINITE;
492 WaitForIOCompletion(timeout, NULL);
495 bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
496 IOItem item;
497 if (completed_io_.empty() || !MatchCompletedIOItem(filter, &item)) {
498 // We have to ask the system for another IO completion.
499 if (!GetIOItem(timeout, &item))
500 return false;
502 if (ProcessInternalIOItem(item))
503 return true;
506 if (item.context->handler) {
507 if (filter && item.handler != filter) {
508 // Save this item for later
509 completed_io_.push_back(item);
510 } else {
511 DCHECK_EQ(item.context->handler, item.handler);
512 WillProcessIOEvent();
513 item.handler->OnIOCompleted(item.context, item.bytes_transfered,
514 item.error);
515 DidProcessIOEvent();
517 } else {
518 // The handler must be gone by now, just cleanup the mess.
519 delete item.context;
521 return true;
524 // Asks the OS for another IO completion result.
525 bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
526 memset(item, 0, sizeof(*item));
527 ULONG_PTR key = NULL;
528 OVERLAPPED* overlapped = NULL;
529 if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
530 &overlapped, timeout)) {
531 if (!overlapped)
532 return false; // Nothing in the queue.
533 item->error = GetLastError();
534 item->bytes_transfered = 0;
537 item->handler = reinterpret_cast<IOHandler*>(key);
538 item->context = reinterpret_cast<IOContext*>(overlapped);
539 return true;
542 bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
543 if (this == reinterpret_cast<MessagePumpForIO*>(item.context) &&
544 this == reinterpret_cast<MessagePumpForIO*>(item.handler)) {
545 // This is our internal completion.
546 DCHECK(!item.bytes_transfered);
547 InterlockedExchange(&have_work_, 0);
548 return true;
550 return false;
553 // Returns a completion item that was previously received.
554 bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
555 DCHECK(!completed_io_.empty());
556 for (std::list<IOItem>::iterator it = completed_io_.begin();
557 it != completed_io_.end(); ++it) {
558 if (!filter || it->handler == filter) {
559 *item = *it;
560 completed_io_.erase(it);
561 return true;
564 return false;
567 void MessagePumpForIO::AddIOObserver(IOObserver *obs) {
568 io_observers_.AddObserver(obs);
571 void MessagePumpForIO::RemoveIOObserver(IOObserver *obs) {
572 io_observers_.RemoveObserver(obs);
575 void MessagePumpForIO::WillProcessIOEvent() {
576 FOR_EACH_OBSERVER(IOObserver, io_observers_, WillProcessIOEvent());
579 void MessagePumpForIO::DidProcessIOEvent() {
580 FOR_EACH_OBSERVER(IOObserver, io_observers_, DidProcessIOEvent());
583 } // namespace base