Cast: Stop logging kVideoFrameSentToEncoder and rename a couple events.
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.cc
blob75a18e92c9cb8a2c727cac16f1a332596a7d3e7a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
9 #include "content/common/gpu/gpu_channel.h"
11 #include <queue>
12 #include <vector>
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "content/common/gpu/devtools_gpu_agent.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/sync_point_manager.h"
24 #include "content/public/common/content_switches.h"
25 #include "gpu/command_buffer/common/mailbox.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/image_manager.h"
28 #include "gpu/command_buffer/service/mailbox_manager.h"
29 #include "ipc/ipc_channel.h"
30 #include "ipc/message_filter.h"
31 #include "ui/gl/gl_context.h"
32 #include "ui/gl/gl_image.h"
33 #include "ui/gl/gl_surface.h"
35 #if defined(OS_POSIX)
36 #include "ipc/ipc_channel_posix.h"
37 #endif
39 namespace content {
40 namespace {
42 // Number of milliseconds between successive vsync. Many GL commands block
43 // on vsync, so thresholds for preemption should be multiples of this.
44 const int64 kVsyncIntervalMs = 17;
46 // Amount of time that we will wait for an IPC to be processed before
47 // preempting. After a preemption, we must wait this long before triggering
48 // another preemption.
49 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
51 // Once we trigger a preemption, the maximum duration that we will wait
52 // before clearing the preemption.
53 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
55 // Stop the preemption once the time for the longest pending IPC drops
56 // below this threshold.
57 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
59 } // anonymous namespace
61 // This filter does three things:
62 // - it counts and timestamps each message forwarded to the channel
63 // so that we can preempt other channels if a message takes too long to
64 // process. To guarantee fairness, we must wait a minimum amount of time
65 // before preempting and we limit the amount of time that we can preempt in
66 // one shot (see constants above).
67 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
68 // thread, generating the sync point ID and responding immediately, and then
69 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
70 // into the channel's queue.
71 // - it generates mailbox names for clients of the GPU process on the IO thread.
72 class GpuChannelMessageFilter : public IPC::MessageFilter {
73 public:
74 // Takes ownership of gpu_channel (see below).
75 GpuChannelMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
76 scoped_refptr<SyncPointManager> sync_point_manager,
77 scoped_refptr<base::MessageLoopProxy> message_loop)
78 : preemption_state_(IDLE),
79 gpu_channel_(gpu_channel),
80 channel_(NULL),
81 sync_point_manager_(sync_point_manager),
82 message_loop_(message_loop),
83 messages_forwarded_to_channel_(0),
84 a_stub_is_descheduled_(false) {
87 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
88 DCHECK(!channel_);
89 channel_ = channel;
92 virtual void OnFilterRemoved() OVERRIDE {
93 DCHECK(channel_);
94 channel_ = NULL;
97 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
98 DCHECK(channel_);
100 bool handled = false;
101 if (message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) {
102 // This message should not be sent explicitly by the renderer.
103 DLOG(ERROR) << "Client should not send "
104 "GpuCommandBufferMsg_RetireSyncPoint message";
105 handled = true;
108 // All other messages get processed by the GpuChannel.
109 if (!handled) {
110 messages_forwarded_to_channel_++;
111 if (preempting_flag_.get())
112 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
113 UpdatePreemptionState();
116 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
117 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
118 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
119 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
120 Send(reply);
121 message_loop_->PostTask(FROM_HERE, base::Bind(
122 &GpuChannelMessageFilter::InsertSyncPointOnMainThread,
123 gpu_channel_,
124 sync_point_manager_,
125 message.routing_id(),
126 sync_point));
127 handled = true;
129 return handled;
132 void MessageProcessed(uint64 messages_processed) {
133 while (!pending_messages_.empty() &&
134 pending_messages_.front().message_number <= messages_processed)
135 pending_messages_.pop();
136 UpdatePreemptionState();
139 void SetPreemptingFlagAndSchedulingState(
140 gpu::PreemptionFlag* preempting_flag,
141 bool a_stub_is_descheduled) {
142 preempting_flag_ = preempting_flag;
143 a_stub_is_descheduled_ = a_stub_is_descheduled;
146 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
147 a_stub_is_descheduled_ = a_stub_is_descheduled;
148 UpdatePreemptionState();
151 bool Send(IPC::Message* message) {
152 return channel_->Send(message);
155 protected:
156 virtual ~GpuChannelMessageFilter() {
157 message_loop_->PostTask(FROM_HERE, base::Bind(
158 &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
161 private:
162 enum PreemptionState {
163 // Either there's no other channel to preempt, there are no messages
164 // pending processing, or we just finished preempting and have to wait
165 // before preempting again.
166 IDLE,
167 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
168 WAITING,
169 // We can preempt whenever any IPC processing takes more than
170 // kPreemptWaitTimeMs.
171 CHECKING,
172 // We are currently preempting (i.e. no stub is descheduled).
173 PREEMPTING,
174 // We would like to preempt, but some stub is descheduled.
175 WOULD_PREEMPT_DESCHEDULED,
178 PreemptionState preemption_state_;
180 // Maximum amount of time that we can spend in PREEMPTING.
181 // It is reset when we transition to IDLE.
182 base::TimeDelta max_preemption_time_;
184 struct PendingMessage {
185 uint64 message_number;
186 base::TimeTicks time_received;
188 explicit PendingMessage(uint64 message_number)
189 : message_number(message_number),
190 time_received(base::TimeTicks::Now()) {
194 void UpdatePreemptionState() {
195 switch (preemption_state_) {
196 case IDLE:
197 if (preempting_flag_.get() && !pending_messages_.empty())
198 TransitionToWaiting();
199 break;
200 case WAITING:
201 // A timer will transition us to CHECKING.
202 DCHECK(timer_.IsRunning());
203 break;
204 case CHECKING:
205 if (!pending_messages_.empty()) {
206 base::TimeDelta time_elapsed =
207 base::TimeTicks::Now() - pending_messages_.front().time_received;
208 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
209 // Schedule another check for when the IPC may go long.
210 timer_.Start(
211 FROM_HERE,
212 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
213 time_elapsed,
214 this, &GpuChannelMessageFilter::UpdatePreemptionState);
215 } else {
216 if (a_stub_is_descheduled_)
217 TransitionToWouldPreemptDescheduled();
218 else
219 TransitionToPreempting();
222 break;
223 case PREEMPTING:
224 // A TransitionToIdle() timer should always be running in this state.
225 DCHECK(timer_.IsRunning());
226 if (a_stub_is_descheduled_)
227 TransitionToWouldPreemptDescheduled();
228 else
229 TransitionToIdleIfCaughtUp();
230 break;
231 case WOULD_PREEMPT_DESCHEDULED:
232 // A TransitionToIdle() timer should never be running in this state.
233 DCHECK(!timer_.IsRunning());
234 if (!a_stub_is_descheduled_)
235 TransitionToPreempting();
236 else
237 TransitionToIdleIfCaughtUp();
238 break;
239 default:
240 NOTREACHED();
244 void TransitionToIdleIfCaughtUp() {
245 DCHECK(preemption_state_ == PREEMPTING ||
246 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
247 if (pending_messages_.empty()) {
248 TransitionToIdle();
249 } else {
250 base::TimeDelta time_elapsed =
251 base::TimeTicks::Now() - pending_messages_.front().time_received;
252 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
253 TransitionToIdle();
257 void TransitionToIdle() {
258 DCHECK(preemption_state_ == PREEMPTING ||
259 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
260 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
261 timer_.Stop();
263 preemption_state_ = IDLE;
264 preempting_flag_->Reset();
265 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
267 UpdatePreemptionState();
270 void TransitionToWaiting() {
271 DCHECK_EQ(preemption_state_, IDLE);
272 DCHECK(!timer_.IsRunning());
274 preemption_state_ = WAITING;
275 timer_.Start(
276 FROM_HERE,
277 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
278 this, &GpuChannelMessageFilter::TransitionToChecking);
281 void TransitionToChecking() {
282 DCHECK_EQ(preemption_state_, WAITING);
283 DCHECK(!timer_.IsRunning());
285 preemption_state_ = CHECKING;
286 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
287 UpdatePreemptionState();
290 void TransitionToPreempting() {
291 DCHECK(preemption_state_ == CHECKING ||
292 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
293 DCHECK(!a_stub_is_descheduled_);
295 // Stop any pending state update checks that we may have queued
296 // while CHECKING.
297 if (preemption_state_ == CHECKING)
298 timer_.Stop();
300 preemption_state_ = PREEMPTING;
301 preempting_flag_->Set();
302 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
304 timer_.Start(
305 FROM_HERE,
306 max_preemption_time_,
307 this, &GpuChannelMessageFilter::TransitionToIdle);
309 UpdatePreemptionState();
312 void TransitionToWouldPreemptDescheduled() {
313 DCHECK(preemption_state_ == CHECKING ||
314 preemption_state_ == PREEMPTING);
315 DCHECK(a_stub_is_descheduled_);
317 if (preemption_state_ == CHECKING) {
318 // Stop any pending state update checks that we may have queued
319 // while CHECKING.
320 timer_.Stop();
321 } else {
322 // Stop any TransitionToIdle() timers that we may have queued
323 // while PREEMPTING.
324 timer_.Stop();
325 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
326 if (max_preemption_time_ < base::TimeDelta()) {
327 TransitionToIdle();
328 return;
332 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
333 preempting_flag_->Reset();
334 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
336 UpdatePreemptionState();
339 static void InsertSyncPointOnMainThread(
340 base::WeakPtr<GpuChannel>* gpu_channel,
341 scoped_refptr<SyncPointManager> manager,
342 int32 routing_id,
343 uint32 sync_point) {
344 // This function must ensure that the sync point will be retired. Normally
345 // we'll find the stub based on the routing ID, and associate the sync point
346 // with it, but if that fails for any reason (channel or stub already
347 // deleted, invalid routing id), we need to retire the sync point
348 // immediately.
349 if (gpu_channel->get()) {
350 GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer(
351 routing_id);
352 if (stub) {
353 stub->AddSyncPoint(sync_point);
354 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
355 gpu_channel->get()->OnMessageReceived(message);
356 return;
357 } else {
358 gpu_channel->get()->MessageProcessed();
361 manager->RetireSyncPoint(sync_point);
364 static void DeleteWeakPtrOnMainThread(
365 base::WeakPtr<GpuChannel>* gpu_channel) {
366 delete gpu_channel;
369 // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
370 // IO thread, it's only passed through - therefore the WeakPtr assumptions are
371 // respected.
372 base::WeakPtr<GpuChannel>* gpu_channel_;
373 IPC::Channel* channel_;
374 scoped_refptr<SyncPointManager> sync_point_manager_;
375 scoped_refptr<base::MessageLoopProxy> message_loop_;
376 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
378 std::queue<PendingMessage> pending_messages_;
380 // Count of the number of IPCs forwarded to the GpuChannel.
381 uint64 messages_forwarded_to_channel_;
383 base::OneShotTimer<GpuChannelMessageFilter> timer_;
385 bool a_stub_is_descheduled_;
388 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
389 GpuWatchdog* watchdog,
390 gfx::GLShareGroup* share_group,
391 gpu::gles2::MailboxManager* mailbox,
392 int client_id,
393 bool software)
394 : gpu_channel_manager_(gpu_channel_manager),
395 messages_processed_(0),
396 client_id_(client_id),
397 share_group_(share_group ? share_group : new gfx::GLShareGroup),
398 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
399 image_manager_(new gpu::gles2::ImageManager),
400 watchdog_(watchdog),
401 software_(software),
402 handle_messages_scheduled_(false),
403 currently_processing_message_(NULL),
404 weak_factory_(this),
405 num_stubs_descheduled_(0) {
406 DCHECK(gpu_channel_manager);
407 DCHECK(client_id);
409 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
410 const CommandLine* command_line = CommandLine::ForCurrentProcess();
411 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
415 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
416 base::WaitableEvent* shutdown_event) {
417 DCHECK(!channel_.get());
419 // Map renderer ID to a (single) channel to that process.
420 channel_.reset(new IPC::SyncChannel(
421 channel_id_,
422 IPC::Channel::MODE_SERVER,
423 this,
424 io_message_loop,
425 false,
426 shutdown_event));
428 base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>(
429 weak_factory_.GetWeakPtr()));
431 filter_ = new GpuChannelMessageFilter(
432 weak_ptr,
433 gpu_channel_manager_->sync_point_manager(),
434 base::MessageLoopProxy::current());
435 io_message_loop_ = io_message_loop;
436 channel_->AddFilter(filter_.get());
438 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
441 std::string GpuChannel::GetChannelName() {
442 return channel_id_;
445 #if defined(OS_POSIX)
446 int GpuChannel::TakeRendererFileDescriptor() {
447 if (!channel_) {
448 NOTREACHED();
449 return -1;
451 return channel_->TakeClientFileDescriptor();
453 #endif // defined(OS_POSIX)
455 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
456 if (log_messages_) {
457 DVLOG(1) << "received message @" << &message << " on channel @" << this
458 << " with type " << message.type();
461 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
462 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
463 // Move Wait commands to the head of the queue, so the renderer
464 // doesn't have to wait any longer than necessary.
465 deferred_messages_.push_front(new IPC::Message(message));
466 } else {
467 deferred_messages_.push_back(new IPC::Message(message));
470 OnScheduled();
472 return true;
475 void GpuChannel::OnChannelError() {
476 gpu_channel_manager_->RemoveChannel(client_id_);
479 bool GpuChannel::Send(IPC::Message* message) {
480 // The GPU process must never send a synchronous IPC message to the renderer
481 // process. This could result in deadlock.
482 DCHECK(!message->is_sync());
483 if (log_messages_) {
484 DVLOG(1) << "sending message @" << message << " on channel @" << this
485 << " with type " << message->type();
488 if (!channel_) {
489 delete message;
490 return false;
493 return channel_->Send(message);
496 void GpuChannel::RequeueMessage() {
497 DCHECK(currently_processing_message_);
498 deferred_messages_.push_front(
499 new IPC::Message(*currently_processing_message_));
500 messages_processed_--;
501 currently_processing_message_ = NULL;
504 void GpuChannel::OnScheduled() {
505 if (handle_messages_scheduled_)
506 return;
507 // Post a task to handle any deferred messages. The deferred message queue is
508 // not emptied here, which ensures that OnMessageReceived will continue to
509 // defer newly received messages until the ones in the queue have all been
510 // handled by HandleMessage. HandleMessage is invoked as a
511 // task to prevent reentrancy.
512 base::MessageLoop::current()->PostTask(
513 FROM_HERE,
514 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
515 handle_messages_scheduled_ = true;
518 void GpuChannel::StubSchedulingChanged(bool scheduled) {
519 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
520 if (scheduled) {
521 num_stubs_descheduled_--;
522 OnScheduled();
523 } else {
524 num_stubs_descheduled_++;
526 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
527 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
529 if (a_stub_is_descheduled != a_stub_was_descheduled) {
530 if (preempting_flag_.get()) {
531 io_message_loop_->PostTask(
532 FROM_HERE,
533 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
534 filter_,
535 a_stub_is_descheduled));
540 bool GpuChannel::CreateViewCommandBuffer(
541 const gfx::GLSurfaceHandle& window,
542 int32 surface_id,
543 const GPUCreateCommandBufferConfig& init_params,
544 int32 route_id) {
545 TRACE_EVENT1("gpu",
546 "GpuChannel::CreateViewCommandBuffer",
547 "surface_id",
548 surface_id);
550 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
552 // Virtualize compositor contexts on OS X to prevent performance regressions
553 // when enabling FCM.
554 // http://crbug.com/180463
555 bool use_virtualized_gl_context = false;
556 #if defined(OS_MACOSX)
557 use_virtualized_gl_context = true;
558 #endif
560 scoped_ptr<GpuCommandBufferStub> stub(
561 new GpuCommandBufferStub(this,
562 share_group,
563 window,
564 mailbox_manager_.get(),
565 image_manager_.get(),
566 gfx::Size(),
567 disallowed_features_,
568 init_params.attribs,
569 init_params.gpu_preference,
570 use_virtualized_gl_context,
571 route_id,
572 surface_id,
573 watchdog_,
574 software_,
575 init_params.active_url));
576 if (preempted_flag_.get())
577 stub->SetPreemptByFlag(preempted_flag_);
578 if (!router_.AddRoute(route_id, stub.get())) {
579 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
580 "failed to add route";
581 return false;
583 stubs_.AddWithID(stub.release(), route_id);
584 return true;
587 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
588 return stubs_.Lookup(route_id);
591 void GpuChannel::CreateImage(
592 gfx::PluginWindowHandle window,
593 int32 image_id,
594 gfx::Size* size) {
595 TRACE_EVENT1("gpu",
596 "GpuChannel::CreateImage",
597 "image_id",
598 image_id);
600 *size = gfx::Size();
602 if (image_manager_->LookupImage(image_id)) {
603 LOG(ERROR) << "CreateImage failed, image_id already in use.";
604 return;
607 scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
608 if (!image.get())
609 return;
611 image_manager_->AddImage(image.get(), image_id);
612 *size = image->GetSize();
615 void GpuChannel::DeleteImage(int32 image_id) {
616 TRACE_EVENT1("gpu",
617 "GpuChannel::DeleteImage",
618 "image_id",
619 image_id);
621 image_manager_->RemoveImage(image_id);
624 void GpuChannel::LoseAllContexts() {
625 gpu_channel_manager_->LoseAllContexts();
628 void GpuChannel::MarkAllContextsLost() {
629 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
630 !it.IsAtEnd(); it.Advance()) {
631 it.GetCurrentValue()->MarkContextLost();
635 void GpuChannel::DestroySoon() {
636 base::MessageLoop::current()->PostTask(
637 FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
640 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
641 return router_.AddRoute(route_id, listener);
644 void GpuChannel::RemoveRoute(int32 route_id) {
645 router_.RemoveRoute(route_id);
648 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
649 if (!preempting_flag_.get()) {
650 preempting_flag_ = new gpu::PreemptionFlag;
651 io_message_loop_->PostTask(
652 FROM_HERE, base::Bind(
653 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
654 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
656 return preempting_flag_.get();
659 void GpuChannel::SetPreemptByFlag(
660 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
661 preempted_flag_ = preempted_flag;
663 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
664 !it.IsAtEnd(); it.Advance()) {
665 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
669 GpuChannel::~GpuChannel() {
670 if (preempting_flag_.get())
671 preempting_flag_->Reset();
674 void GpuChannel::OnDestroy() {
675 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
676 gpu_channel_manager_->RemoveChannel(client_id_);
679 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
680 bool handled = true;
681 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
682 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
683 OnCreateOffscreenCommandBuffer)
684 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
685 OnDestroyCommandBuffer)
686 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
687 OnDevToolsStartEventsRecording)
688 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
689 OnDevToolsStopEventsRecording)
690 IPC_MESSAGE_UNHANDLED(handled = false)
691 IPC_END_MESSAGE_MAP()
692 DCHECK(handled) << msg.type();
693 return handled;
696 void GpuChannel::HandleMessage() {
697 handle_messages_scheduled_ = false;
698 if (deferred_messages_.empty())
699 return;
701 bool should_fast_track_ack = false;
702 IPC::Message* m = deferred_messages_.front();
703 GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
705 do {
706 if (stub) {
707 if (!stub->IsScheduled())
708 return;
709 if (stub->IsPreempted()) {
710 OnScheduled();
711 return;
715 scoped_ptr<IPC::Message> message(m);
716 deferred_messages_.pop_front();
717 bool message_processed = true;
719 currently_processing_message_ = message.get();
720 bool result;
721 if (message->routing_id() == MSG_ROUTING_CONTROL)
722 result = OnControlMessageReceived(*message);
723 else
724 result = router_.RouteMessage(*message);
725 currently_processing_message_ = NULL;
727 if (!result) {
728 // Respond to sync messages even if router failed to route.
729 if (message->is_sync()) {
730 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
731 reply->set_reply_error();
732 Send(reply);
734 } else {
735 // If the command buffer becomes unscheduled as a result of handling the
736 // message but still has more commands to process, synthesize an IPC
737 // message to flush that command buffer.
738 if (stub) {
739 if (stub->HasUnprocessedCommands()) {
740 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
741 stub->route_id()));
742 message_processed = false;
746 if (message_processed)
747 MessageProcessed();
749 // We want the EchoACK following the SwapBuffers to be sent as close as
750 // possible, avoiding scheduling other channels in the meantime.
751 should_fast_track_ack = false;
752 if (!deferred_messages_.empty()) {
753 m = deferred_messages_.front();
754 stub = stubs_.Lookup(m->routing_id());
755 should_fast_track_ack =
756 (m->type() == GpuCommandBufferMsg_Echo::ID) &&
757 stub && stub->IsScheduled();
759 } while (should_fast_track_ack);
761 if (!deferred_messages_.empty()) {
762 OnScheduled();
766 void GpuChannel::OnCreateOffscreenCommandBuffer(
767 const gfx::Size& size,
768 const GPUCreateCommandBufferConfig& init_params,
769 int32 route_id,
770 bool* succeeded) {
771 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
772 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
774 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
775 this,
776 share_group,
777 gfx::GLSurfaceHandle(),
778 mailbox_manager_.get(),
779 image_manager_.get(),
780 size,
781 disallowed_features_,
782 init_params.attribs,
783 init_params.gpu_preference,
784 false,
785 route_id,
787 watchdog_,
788 software_,
789 init_params.active_url));
790 if (preempted_flag_.get())
791 stub->SetPreemptByFlag(preempted_flag_);
792 if (!router_.AddRoute(route_id, stub.get())) {
793 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
794 "failed to add route";
795 *succeeded = false;
796 return;
798 stubs_.AddWithID(stub.release(), route_id);
799 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
800 "route_id", route_id);
801 *succeeded = true;
804 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
805 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
806 "route_id", route_id);
808 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
809 if (!stub)
810 return;
811 bool need_reschedule = (stub && !stub->IsScheduled());
812 router_.RemoveRoute(route_id);
813 stubs_.Remove(route_id);
814 // In case the renderer is currently blocked waiting for a sync reply from the
815 // stub, we need to make sure to reschedule the GpuChannel here.
816 if (need_reschedule) {
817 // This stub won't get a chance to reschedule, so update the count now.
818 StubSchedulingChanged(true);
822 void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id,
823 bool* succeeded) {
824 *succeeded = devtools_gpu_agent_->StartEventsRecording(route_id);
827 void GpuChannel::OnDevToolsStopEventsRecording() {
828 devtools_gpu_agent_->StopEventsRecording();
831 void GpuChannel::MessageProcessed() {
832 messages_processed_++;
833 if (preempting_flag_.get()) {
834 io_message_loop_->PostTask(
835 FROM_HERE,
836 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
837 filter_,
838 messages_processed_));
842 void GpuChannel::CacheShader(const std::string& key,
843 const std::string& shader) {
844 gpu_channel_manager_->Send(
845 new GpuHostMsg_CacheShader(client_id_, key, shader));
848 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
849 channel_->AddFilter(filter);
852 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
853 channel_->RemoveFilter(filter);
856 uint64 GpuChannel::GetMemoryUsage() {
857 uint64 size = 0;
858 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
859 !it.IsAtEnd(); it.Advance()) {
860 size += it.GetCurrentValue()->GetMemoryUsage();
862 return size;
865 } // namespace content