[SyncFS] Build indexes from FileTracker entries on disk.
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.cc
blobb1089eb69080d817113ab9aa8cca4cd3c68e778a
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
9 #include "content/common/gpu/gpu_channel.h"
11 #include <queue>
12 #include <vector>
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/stl_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/devtools_gpu_agent.h"
22 #include "content/common/gpu/gpu_channel_manager.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_manager.h"
29 #include "gpu/command_buffer/service/mailbox_manager.h"
30 #include "ipc/ipc_channel.h"
31 #include "ipc/message_filter.h"
32 #include "ui/gl/gl_context.h"
33 #include "ui/gl/gl_image.h"
34 #include "ui/gl/gl_surface.h"
36 #if defined(OS_POSIX)
37 #include "ipc/ipc_channel_posix.h"
38 #endif
40 namespace content {
41 namespace {
43 // Number of milliseconds between successive vsync. Many GL commands block
44 // on vsync, so thresholds for preemption should be multiples of this.
45 const int64 kVsyncIntervalMs = 17;
47 // Amount of time that we will wait for an IPC to be processed before
48 // preempting. After a preemption, we must wait this long before triggering
49 // another preemption.
50 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
52 // Once we trigger a preemption, the maximum duration that we will wait
53 // before clearing the preemption.
54 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
56 // Stop the preemption once the time for the longest pending IPC drops
57 // below this threshold.
58 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
60 } // anonymous namespace
62 // This filter does three things:
63 // - it counts and timestamps each message forwarded to the channel
64 // so that we can preempt other channels if a message takes too long to
65 // process. To guarantee fairness, we must wait a minimum amount of time
66 // before preempting and we limit the amount of time that we can preempt in
67 // one shot (see constants above).
68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
69 // thread, generating the sync point ID and responding immediately, and then
70 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
71 // into the channel's queue.
72 // - it generates mailbox names for clients of the GPU process on the IO thread.
73 class GpuChannelMessageFilter : public IPC::MessageFilter {
74 public:
75 GpuChannelMessageFilter(base::WeakPtr<GpuChannel> gpu_channel,
76 scoped_refptr<SyncPointManager> sync_point_manager,
77 scoped_refptr<base::MessageLoopProxy> message_loop,
78 bool future_sync_points)
79 : preemption_state_(IDLE),
80 gpu_channel_(gpu_channel),
81 sender_(NULL),
82 sync_point_manager_(sync_point_manager),
83 message_loop_(message_loop),
84 messages_forwarded_to_channel_(0),
85 a_stub_is_descheduled_(false),
86 future_sync_points_(future_sync_points) {}
88 virtual void OnFilterAdded(IPC::Sender* sender) OVERRIDE {
89 DCHECK(!sender_);
90 sender_ = sender;
93 virtual void OnFilterRemoved() OVERRIDE {
94 DCHECK(sender_);
95 sender_ = NULL;
98 virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE {
99 DCHECK(sender_);
101 bool handled = false;
102 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
103 !future_sync_points_) {
104 DLOG(ERROR) << "Untrusted client should not send "
105 "GpuCommandBufferMsg_RetireSyncPoint message";
106 return true;
109 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
110 Tuple1<bool> retire;
111 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
112 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
113 &retire)) {
114 reply->set_reply_error();
115 Send(reply);
116 return true;
118 if (!future_sync_points_ && !retire.a) {
119 LOG(ERROR) << "Untrusted contexts can't create future sync points";
120 reply->set_reply_error();
121 Send(reply);
122 return true;
124 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
125 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
126 Send(reply);
127 message_loop_->PostTask(
128 FROM_HERE,
129 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
130 gpu_channel_,
131 sync_point_manager_,
132 message.routing_id(),
133 retire.a,
134 sync_point));
135 handled = true;
138 // All other messages get processed by the GpuChannel.
139 messages_forwarded_to_channel_++;
140 if (preempting_flag_.get())
141 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
142 UpdatePreemptionState();
144 return handled;
147 void MessageProcessed(uint64 messages_processed) {
148 while (!pending_messages_.empty() &&
149 pending_messages_.front().message_number <= messages_processed)
150 pending_messages_.pop();
151 UpdatePreemptionState();
154 void SetPreemptingFlagAndSchedulingState(
155 gpu::PreemptionFlag* preempting_flag,
156 bool a_stub_is_descheduled) {
157 preempting_flag_ = preempting_flag;
158 a_stub_is_descheduled_ = a_stub_is_descheduled;
161 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
162 a_stub_is_descheduled_ = a_stub_is_descheduled;
163 UpdatePreemptionState();
166 bool Send(IPC::Message* message) {
167 return sender_->Send(message);
170 protected:
171 virtual ~GpuChannelMessageFilter() {}
173 private:
174 enum PreemptionState {
175 // Either there's no other channel to preempt, there are no messages
176 // pending processing, or we just finished preempting and have to wait
177 // before preempting again.
178 IDLE,
179 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
180 WAITING,
181 // We can preempt whenever any IPC processing takes more than
182 // kPreemptWaitTimeMs.
183 CHECKING,
184 // We are currently preempting (i.e. no stub is descheduled).
185 PREEMPTING,
186 // We would like to preempt, but some stub is descheduled.
187 WOULD_PREEMPT_DESCHEDULED,
190 PreemptionState preemption_state_;
192 // Maximum amount of time that we can spend in PREEMPTING.
193 // It is reset when we transition to IDLE.
194 base::TimeDelta max_preemption_time_;
196 struct PendingMessage {
197 uint64 message_number;
198 base::TimeTicks time_received;
200 explicit PendingMessage(uint64 message_number)
201 : message_number(message_number),
202 time_received(base::TimeTicks::Now()) {
206 void UpdatePreemptionState() {
207 switch (preemption_state_) {
208 case IDLE:
209 if (preempting_flag_.get() && !pending_messages_.empty())
210 TransitionToWaiting();
211 break;
212 case WAITING:
213 // A timer will transition us to CHECKING.
214 DCHECK(timer_.IsRunning());
215 break;
216 case CHECKING:
217 if (!pending_messages_.empty()) {
218 base::TimeDelta time_elapsed =
219 base::TimeTicks::Now() - pending_messages_.front().time_received;
220 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
221 // Schedule another check for when the IPC may go long.
222 timer_.Start(
223 FROM_HERE,
224 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
225 time_elapsed,
226 this, &GpuChannelMessageFilter::UpdatePreemptionState);
227 } else {
228 if (a_stub_is_descheduled_)
229 TransitionToWouldPreemptDescheduled();
230 else
231 TransitionToPreempting();
234 break;
235 case PREEMPTING:
236 // A TransitionToIdle() timer should always be running in this state.
237 DCHECK(timer_.IsRunning());
238 if (a_stub_is_descheduled_)
239 TransitionToWouldPreemptDescheduled();
240 else
241 TransitionToIdleIfCaughtUp();
242 break;
243 case WOULD_PREEMPT_DESCHEDULED:
244 // A TransitionToIdle() timer should never be running in this state.
245 DCHECK(!timer_.IsRunning());
246 if (!a_stub_is_descheduled_)
247 TransitionToPreempting();
248 else
249 TransitionToIdleIfCaughtUp();
250 break;
251 default:
252 NOTREACHED();
256 void TransitionToIdleIfCaughtUp() {
257 DCHECK(preemption_state_ == PREEMPTING ||
258 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
259 if (pending_messages_.empty()) {
260 TransitionToIdle();
261 } else {
262 base::TimeDelta time_elapsed =
263 base::TimeTicks::Now() - pending_messages_.front().time_received;
264 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
265 TransitionToIdle();
269 void TransitionToIdle() {
270 DCHECK(preemption_state_ == PREEMPTING ||
271 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
272 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
273 timer_.Stop();
275 preemption_state_ = IDLE;
276 preempting_flag_->Reset();
277 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
279 UpdatePreemptionState();
282 void TransitionToWaiting() {
283 DCHECK_EQ(preemption_state_, IDLE);
284 DCHECK(!timer_.IsRunning());
286 preemption_state_ = WAITING;
287 timer_.Start(
288 FROM_HERE,
289 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
290 this, &GpuChannelMessageFilter::TransitionToChecking);
293 void TransitionToChecking() {
294 DCHECK_EQ(preemption_state_, WAITING);
295 DCHECK(!timer_.IsRunning());
297 preemption_state_ = CHECKING;
298 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
299 UpdatePreemptionState();
302 void TransitionToPreempting() {
303 DCHECK(preemption_state_ == CHECKING ||
304 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
305 DCHECK(!a_stub_is_descheduled_);
307 // Stop any pending state update checks that we may have queued
308 // while CHECKING.
309 if (preemption_state_ == CHECKING)
310 timer_.Stop();
312 preemption_state_ = PREEMPTING;
313 preempting_flag_->Set();
314 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
316 timer_.Start(
317 FROM_HERE,
318 max_preemption_time_,
319 this, &GpuChannelMessageFilter::TransitionToIdle);
321 UpdatePreemptionState();
324 void TransitionToWouldPreemptDescheduled() {
325 DCHECK(preemption_state_ == CHECKING ||
326 preemption_state_ == PREEMPTING);
327 DCHECK(a_stub_is_descheduled_);
329 if (preemption_state_ == CHECKING) {
330 // Stop any pending state update checks that we may have queued
331 // while CHECKING.
332 timer_.Stop();
333 } else {
334 // Stop any TransitionToIdle() timers that we may have queued
335 // while PREEMPTING.
336 timer_.Stop();
337 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
338 if (max_preemption_time_ < base::TimeDelta()) {
339 TransitionToIdle();
340 return;
344 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
345 preempting_flag_->Reset();
346 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
348 UpdatePreemptionState();
351 static void InsertSyncPointOnMainThread(
352 base::WeakPtr<GpuChannel> gpu_channel,
353 scoped_refptr<SyncPointManager> manager,
354 int32 routing_id,
355 bool retire,
356 uint32 sync_point) {
357 // This function must ensure that the sync point will be retired. Normally
358 // we'll find the stub based on the routing ID, and associate the sync point
359 // with it, but if that fails for any reason (channel or stub already
360 // deleted, invalid routing id), we need to retire the sync point
361 // immediately.
362 if (gpu_channel) {
363 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
364 if (stub) {
365 stub->AddSyncPoint(sync_point);
366 if (retire) {
367 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
368 gpu_channel->OnMessageReceived(message);
370 return;
371 } else {
372 gpu_channel->MessageProcessed();
375 manager->RetireSyncPoint(sync_point);
378 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
379 // passed through - therefore the WeakPtr assumptions are respected.
380 base::WeakPtr<GpuChannel> gpu_channel_;
381 IPC::Sender* sender_;
382 scoped_refptr<SyncPointManager> sync_point_manager_;
383 scoped_refptr<base::MessageLoopProxy> message_loop_;
384 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
386 std::queue<PendingMessage> pending_messages_;
388 // Count of the number of IPCs forwarded to the GpuChannel.
389 uint64 messages_forwarded_to_channel_;
391 base::OneShotTimer<GpuChannelMessageFilter> timer_;
393 bool a_stub_is_descheduled_;
395 // True if this channel can create future sync points.
396 bool future_sync_points_;
399 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
400 GpuWatchdog* watchdog,
401 gfx::GLShareGroup* share_group,
402 gpu::gles2::MailboxManager* mailbox,
403 int client_id,
404 bool software,
405 bool allow_future_sync_points)
406 : gpu_channel_manager_(gpu_channel_manager),
407 messages_processed_(0),
408 client_id_(client_id),
409 share_group_(share_group ? share_group : new gfx::GLShareGroup),
410 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManager),
411 image_manager_(new gpu::gles2::ImageManager),
412 watchdog_(watchdog),
413 software_(software),
414 handle_messages_scheduled_(false),
415 currently_processing_message_(NULL),
416 weak_factory_(this),
417 num_stubs_descheduled_(0),
418 allow_future_sync_points_(allow_future_sync_points) {
419 DCHECK(gpu_channel_manager);
420 DCHECK(client_id);
422 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
423 const CommandLine* command_line = CommandLine::ForCurrentProcess();
424 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
427 GpuChannel::~GpuChannel() {
428 STLDeleteElements(&deferred_messages_);
429 if (preempting_flag_.get())
430 preempting_flag_->Reset();
433 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
434 base::WaitableEvent* shutdown_event) {
435 DCHECK(!channel_.get());
437 // Map renderer ID to a (single) channel to that process.
438 channel_ = IPC::SyncChannel::Create(channel_id_,
439 IPC::Channel::MODE_SERVER,
440 this,
441 io_message_loop,
442 false,
443 shutdown_event);
445 filter_ =
446 new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
447 gpu_channel_manager_->sync_point_manager(),
448 base::MessageLoopProxy::current(),
449 allow_future_sync_points_);
450 io_message_loop_ = io_message_loop;
451 channel_->AddFilter(filter_.get());
453 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
456 std::string GpuChannel::GetChannelName() {
457 return channel_id_;
460 #if defined(OS_POSIX)
461 int GpuChannel::TakeRendererFileDescriptor() {
462 if (!channel_) {
463 NOTREACHED();
464 return -1;
466 return channel_->TakeClientFileDescriptor();
468 #endif // defined(OS_POSIX)
470 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
471 if (log_messages_) {
472 DVLOG(1) << "received message @" << &message << " on channel @" << this
473 << " with type " << message.type();
476 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
477 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
478 // Move Wait commands to the head of the queue, so the renderer
479 // doesn't have to wait any longer than necessary.
480 deferred_messages_.push_front(new IPC::Message(message));
481 } else {
482 deferred_messages_.push_back(new IPC::Message(message));
485 OnScheduled();
487 return true;
490 void GpuChannel::OnChannelError() {
491 gpu_channel_manager_->RemoveChannel(client_id_);
494 bool GpuChannel::Send(IPC::Message* message) {
495 // The GPU process must never send a synchronous IPC message to the renderer
496 // process. This could result in deadlock.
497 DCHECK(!message->is_sync());
498 if (log_messages_) {
499 DVLOG(1) << "sending message @" << message << " on channel @" << this
500 << " with type " << message->type();
503 if (!channel_) {
504 delete message;
505 return false;
508 return channel_->Send(message);
511 void GpuChannel::RequeueMessage() {
512 DCHECK(currently_processing_message_);
513 deferred_messages_.push_front(
514 new IPC::Message(*currently_processing_message_));
515 messages_processed_--;
516 currently_processing_message_ = NULL;
519 void GpuChannel::OnScheduled() {
520 if (handle_messages_scheduled_)
521 return;
522 // Post a task to handle any deferred messages. The deferred message queue is
523 // not emptied here, which ensures that OnMessageReceived will continue to
524 // defer newly received messages until the ones in the queue have all been
525 // handled by HandleMessage. HandleMessage is invoked as a
526 // task to prevent reentrancy.
527 base::MessageLoop::current()->PostTask(
528 FROM_HERE,
529 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
530 handle_messages_scheduled_ = true;
533 void GpuChannel::StubSchedulingChanged(bool scheduled) {
534 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
535 if (scheduled) {
536 num_stubs_descheduled_--;
537 OnScheduled();
538 } else {
539 num_stubs_descheduled_++;
541 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
542 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
544 if (a_stub_is_descheduled != a_stub_was_descheduled) {
545 if (preempting_flag_.get()) {
546 io_message_loop_->PostTask(
547 FROM_HERE,
548 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
549 filter_,
550 a_stub_is_descheduled));
555 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
556 const gfx::GLSurfaceHandle& window,
557 int32 surface_id,
558 const GPUCreateCommandBufferConfig& init_params,
559 int32 route_id) {
560 TRACE_EVENT1("gpu",
561 "GpuChannel::CreateViewCommandBuffer",
562 "surface_id",
563 surface_id);
565 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
567 // Virtualize compositor contexts on OS X to prevent performance regressions
568 // when enabling FCM.
569 // http://crbug.com/180463
570 bool use_virtualized_gl_context = false;
571 #if defined(OS_MACOSX)
572 use_virtualized_gl_context = true;
573 #endif
575 scoped_ptr<GpuCommandBufferStub> stub(
576 new GpuCommandBufferStub(this,
577 share_group,
578 window,
579 mailbox_manager_.get(),
580 image_manager_.get(),
581 gfx::Size(),
582 disallowed_features_,
583 init_params.attribs,
584 init_params.gpu_preference,
585 use_virtualized_gl_context,
586 route_id,
587 surface_id,
588 watchdog_,
589 software_,
590 init_params.active_url));
591 if (preempted_flag_.get())
592 stub->SetPreemptByFlag(preempted_flag_);
593 if (!router_.AddRoute(route_id, stub.get())) {
594 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
595 "failed to add route";
596 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
598 stubs_.AddWithID(stub.release(), route_id);
599 return CREATE_COMMAND_BUFFER_SUCCEEDED;
602 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
603 return stubs_.Lookup(route_id);
606 void GpuChannel::CreateImage(
607 gfx::PluginWindowHandle window,
608 int32 image_id,
609 gfx::Size* size) {
610 TRACE_EVENT1("gpu",
611 "GpuChannel::CreateImage",
612 "image_id",
613 image_id);
615 *size = gfx::Size();
617 if (image_manager_->LookupImage(image_id)) {
618 LOG(ERROR) << "CreateImage failed, image_id already in use.";
619 return;
622 scoped_refptr<gfx::GLImage> image = gfx::GLImage::CreateGLImage(window);
623 if (!image.get())
624 return;
626 image_manager_->AddImage(image.get(), image_id);
627 *size = image->GetSize();
630 void GpuChannel::DeleteImage(int32 image_id) {
631 TRACE_EVENT1("gpu",
632 "GpuChannel::DeleteImage",
633 "image_id",
634 image_id);
636 image_manager_->RemoveImage(image_id);
639 void GpuChannel::LoseAllContexts() {
640 gpu_channel_manager_->LoseAllContexts();
643 void GpuChannel::MarkAllContextsLost() {
644 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
645 !it.IsAtEnd(); it.Advance()) {
646 it.GetCurrentValue()->MarkContextLost();
650 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
651 return router_.AddRoute(route_id, listener);
654 void GpuChannel::RemoveRoute(int32 route_id) {
655 router_.RemoveRoute(route_id);
658 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
659 if (!preempting_flag_.get()) {
660 preempting_flag_ = new gpu::PreemptionFlag;
661 io_message_loop_->PostTask(
662 FROM_HERE, base::Bind(
663 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
664 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
666 return preempting_flag_.get();
669 void GpuChannel::SetPreemptByFlag(
670 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
671 preempted_flag_ = preempted_flag;
673 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
674 !it.IsAtEnd(); it.Advance()) {
675 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
679 void GpuChannel::OnDestroy() {
680 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
681 gpu_channel_manager_->RemoveChannel(client_id_);
684 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
685 bool handled = true;
686 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
687 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
688 OnCreateOffscreenCommandBuffer)
689 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
690 OnDestroyCommandBuffer)
691 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording,
692 OnDevToolsStartEventsRecording)
693 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording,
694 OnDevToolsStopEventsRecording)
695 IPC_MESSAGE_UNHANDLED(handled = false)
696 IPC_END_MESSAGE_MAP()
697 DCHECK(handled) << msg.type();
698 return handled;
701 void GpuChannel::HandleMessage() {
702 handle_messages_scheduled_ = false;
703 if (deferred_messages_.empty())
704 return;
706 bool should_fast_track_ack = false;
707 IPC::Message* m = deferred_messages_.front();
708 GpuCommandBufferStub* stub = stubs_.Lookup(m->routing_id());
710 do {
711 if (stub) {
712 if (!stub->IsScheduled())
713 return;
714 if (stub->IsPreempted()) {
715 OnScheduled();
716 return;
720 scoped_ptr<IPC::Message> message(m);
721 deferred_messages_.pop_front();
722 bool message_processed = true;
724 currently_processing_message_ = message.get();
725 bool result;
726 if (message->routing_id() == MSG_ROUTING_CONTROL)
727 result = OnControlMessageReceived(*message);
728 else
729 result = router_.RouteMessage(*message);
730 currently_processing_message_ = NULL;
732 if (!result) {
733 // Respond to sync messages even if router failed to route.
734 if (message->is_sync()) {
735 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
736 reply->set_reply_error();
737 Send(reply);
739 } else {
740 // If the command buffer becomes unscheduled as a result of handling the
741 // message but still has more commands to process, synthesize an IPC
742 // message to flush that command buffer.
743 if (stub) {
744 if (stub->HasUnprocessedCommands()) {
745 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
746 stub->route_id()));
747 message_processed = false;
751 if (message_processed)
752 MessageProcessed();
754 // We want the EchoACK following the SwapBuffers to be sent as close as
755 // possible, avoiding scheduling other channels in the meantime.
756 should_fast_track_ack = false;
757 if (!deferred_messages_.empty()) {
758 m = deferred_messages_.front();
759 stub = stubs_.Lookup(m->routing_id());
760 should_fast_track_ack =
761 (m->type() == GpuCommandBufferMsg_Echo::ID) &&
762 stub && stub->IsScheduled();
764 } while (should_fast_track_ack);
766 if (!deferred_messages_.empty()) {
767 OnScheduled();
771 void GpuChannel::OnCreateOffscreenCommandBuffer(
772 const gfx::Size& size,
773 const GPUCreateCommandBufferConfig& init_params,
774 int32 route_id,
775 bool* succeeded) {
776 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
777 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
779 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
780 this,
781 share_group,
782 gfx::GLSurfaceHandle(),
783 mailbox_manager_.get(),
784 image_manager_.get(),
785 size,
786 disallowed_features_,
787 init_params.attribs,
788 init_params.gpu_preference,
789 false,
790 route_id,
792 watchdog_,
793 software_,
794 init_params.active_url));
795 if (preempted_flag_.get())
796 stub->SetPreemptByFlag(preempted_flag_);
797 if (!router_.AddRoute(route_id, stub.get())) {
798 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
799 "failed to add route";
800 *succeeded = false;
801 return;
803 stubs_.AddWithID(stub.release(), route_id);
804 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
805 "route_id", route_id);
806 *succeeded = true;
809 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
810 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
811 "route_id", route_id);
813 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
814 if (!stub)
815 return;
816 bool need_reschedule = (stub && !stub->IsScheduled());
817 router_.RemoveRoute(route_id);
818 stubs_.Remove(route_id);
819 // In case the renderer is currently blocked waiting for a sync reply from the
820 // stub, we need to make sure to reschedule the GpuChannel here.
821 if (need_reschedule) {
822 // This stub won't get a chance to reschedule, so update the count now.
823 StubSchedulingChanged(true);
827 void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id,
828 bool* succeeded) {
829 *succeeded = devtools_gpu_agent_->StartEventsRecording(route_id);
832 void GpuChannel::OnDevToolsStopEventsRecording() {
833 devtools_gpu_agent_->StopEventsRecording();
836 void GpuChannel::MessageProcessed() {
837 messages_processed_++;
838 if (preempting_flag_.get()) {
839 io_message_loop_->PostTask(
840 FROM_HERE,
841 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
842 filter_,
843 messages_processed_));
847 void GpuChannel::CacheShader(const std::string& key,
848 const std::string& shader) {
849 gpu_channel_manager_->Send(
850 new GpuHostMsg_CacheShader(client_id_, key, shader));
853 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
854 channel_->AddFilter(filter);
857 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
858 channel_->RemoveFilter(filter);
861 uint64 GpuChannel::GetMemoryUsage() {
862 uint64 size = 0;
863 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
864 !it.IsAtEnd(); it.Advance()) {
865 size += it.GetCurrentValue()->GetMemoryUsage();
867 return size;
870 } // namespace content