1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/stl_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/devtools_gpu_agent.h"
22 #include "content/common/gpu/gpu_channel_manager.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/mailbox.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/mailbox_manager.h"
29 #include "ipc/ipc_channel.h"
30 #include "ipc/message_filter.h"
31 #include "ui/gl/gl_context.h"
32 #include "ui/gl/gl_surface.h"
35 #include "ipc/ipc_channel_posix.h"
41 // Number of milliseconds between successive vsync. Many GL commands block
42 // on vsync, so thresholds for preemption should be multiples of this.
43 const int64 kVsyncIntervalMs
= 17;
45 // Amount of time that we will wait for an IPC to be processed before
46 // preempting. After a preemption, we must wait this long before triggering
47 // another preemption.
48 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
50 // Once we trigger a preemption, the maximum duration that we will wait
51 // before clearing the preemption.
52 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
54 // Stop the preemption once the time for the longest pending IPC drops
55 // below this threshold.
56 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
58 } // anonymous namespace
60 // This filter does three things:
61 // - it counts and timestamps each message forwarded to the channel
62 // so that we can preempt other channels if a message takes too long to
63 // process. To guarantee fairness, we must wait a minimum amount of time
64 // before preempting and we limit the amount of time that we can preempt in
65 // one shot (see constants above).
66 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
67 // thread, generating the sync point ID and responding immediately, and then
68 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
69 // into the channel's queue.
70 // - it generates mailbox names for clients of the GPU process on the IO thread.
71 class GpuChannelMessageFilter
: public IPC::MessageFilter
{
73 GpuChannelMessageFilter(base::WeakPtr
<GpuChannel
> gpu_channel
,
74 scoped_refptr
<SyncPointManager
> sync_point_manager
,
75 scoped_refptr
<base::MessageLoopProxy
> message_loop
,
76 bool future_sync_points
)
77 : preemption_state_(IDLE
),
78 gpu_channel_(gpu_channel
),
80 sync_point_manager_(sync_point_manager
),
81 message_loop_(message_loop
),
82 messages_forwarded_to_channel_(0),
83 a_stub_is_descheduled_(false),
84 future_sync_points_(future_sync_points
) {}
86 virtual void OnFilterAdded(IPC::Sender
* sender
) OVERRIDE
{
91 virtual void OnFilterRemoved() OVERRIDE
{
96 virtual bool OnMessageReceived(const IPC::Message
& message
) OVERRIDE
{
100 if ((message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) &&
101 !future_sync_points_
) {
102 DLOG(ERROR
) << "Untrusted client should not send "
103 "GpuCommandBufferMsg_RetireSyncPoint message";
107 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
109 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
110 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message
,
112 reply
->set_reply_error();
116 if (!future_sync_points_
&& !retire
.a
) {
117 LOG(ERROR
) << "Untrusted contexts can't create future sync points";
118 reply
->set_reply_error();
122 uint32 sync_point
= sync_point_manager_
->GenerateSyncPoint();
123 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
125 message_loop_
->PostTask(
127 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread
,
130 message
.routing_id(),
136 // All other messages get processed by the GpuChannel.
137 messages_forwarded_to_channel_
++;
138 if (preempting_flag_
.get())
139 pending_messages_
.push(PendingMessage(messages_forwarded_to_channel_
));
140 UpdatePreemptionState();
145 void MessageProcessed(uint64 messages_processed
) {
146 while (!pending_messages_
.empty() &&
147 pending_messages_
.front().message_number
<= messages_processed
)
148 pending_messages_
.pop();
149 UpdatePreemptionState();
152 void SetPreemptingFlagAndSchedulingState(
153 gpu::PreemptionFlag
* preempting_flag
,
154 bool a_stub_is_descheduled
) {
155 preempting_flag_
= preempting_flag
;
156 a_stub_is_descheduled_
= a_stub_is_descheduled
;
159 void UpdateStubSchedulingState(bool a_stub_is_descheduled
) {
160 a_stub_is_descheduled_
= a_stub_is_descheduled
;
161 UpdatePreemptionState();
164 bool Send(IPC::Message
* message
) {
165 return sender_
->Send(message
);
169 virtual ~GpuChannelMessageFilter() {}
172 enum PreemptionState
{
173 // Either there's no other channel to preempt, there are no messages
174 // pending processing, or we just finished preempting and have to wait
175 // before preempting again.
177 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
179 // We can preempt whenever any IPC processing takes more than
180 // kPreemptWaitTimeMs.
182 // We are currently preempting (i.e. no stub is descheduled).
184 // We would like to preempt, but some stub is descheduled.
185 WOULD_PREEMPT_DESCHEDULED
,
188 PreemptionState preemption_state_
;
190 // Maximum amount of time that we can spend in PREEMPTING.
191 // It is reset when we transition to IDLE.
192 base::TimeDelta max_preemption_time_
;
194 struct PendingMessage
{
195 uint64 message_number
;
196 base::TimeTicks time_received
;
198 explicit PendingMessage(uint64 message_number
)
199 : message_number(message_number
),
200 time_received(base::TimeTicks::Now()) {
204 void UpdatePreemptionState() {
205 switch (preemption_state_
) {
207 if (preempting_flag_
.get() && !pending_messages_
.empty())
208 TransitionToWaiting();
211 // A timer will transition us to CHECKING.
212 DCHECK(timer_
.IsRunning());
215 if (!pending_messages_
.empty()) {
216 base::TimeDelta time_elapsed
=
217 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
218 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
219 // Schedule another check for when the IPC may go long.
222 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
224 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
226 if (a_stub_is_descheduled_
)
227 TransitionToWouldPreemptDescheduled();
229 TransitionToPreempting();
234 // A TransitionToIdle() timer should always be running in this state.
235 DCHECK(timer_
.IsRunning());
236 if (a_stub_is_descheduled_
)
237 TransitionToWouldPreemptDescheduled();
239 TransitionToIdleIfCaughtUp();
241 case WOULD_PREEMPT_DESCHEDULED
:
242 // A TransitionToIdle() timer should never be running in this state.
243 DCHECK(!timer_
.IsRunning());
244 if (!a_stub_is_descheduled_
)
245 TransitionToPreempting();
247 TransitionToIdleIfCaughtUp();
254 void TransitionToIdleIfCaughtUp() {
255 DCHECK(preemption_state_
== PREEMPTING
||
256 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
257 if (pending_messages_
.empty()) {
260 base::TimeDelta time_elapsed
=
261 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
262 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
267 void TransitionToIdle() {
268 DCHECK(preemption_state_
== PREEMPTING
||
269 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
270 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
273 preemption_state_
= IDLE
;
274 preempting_flag_
->Reset();
275 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
277 UpdatePreemptionState();
280 void TransitionToWaiting() {
281 DCHECK_EQ(preemption_state_
, IDLE
);
282 DCHECK(!timer_
.IsRunning());
284 preemption_state_
= WAITING
;
287 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
),
288 this, &GpuChannelMessageFilter::TransitionToChecking
);
291 void TransitionToChecking() {
292 DCHECK_EQ(preemption_state_
, WAITING
);
293 DCHECK(!timer_
.IsRunning());
295 preemption_state_
= CHECKING
;
296 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
297 UpdatePreemptionState();
300 void TransitionToPreempting() {
301 DCHECK(preemption_state_
== CHECKING
||
302 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
303 DCHECK(!a_stub_is_descheduled_
);
305 // Stop any pending state update checks that we may have queued
307 if (preemption_state_
== CHECKING
)
310 preemption_state_
= PREEMPTING
;
311 preempting_flag_
->Set();
312 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
316 max_preemption_time_
,
317 this, &GpuChannelMessageFilter::TransitionToIdle
);
319 UpdatePreemptionState();
322 void TransitionToWouldPreemptDescheduled() {
323 DCHECK(preemption_state_
== CHECKING
||
324 preemption_state_
== PREEMPTING
);
325 DCHECK(a_stub_is_descheduled_
);
327 if (preemption_state_
== CHECKING
) {
328 // Stop any pending state update checks that we may have queued
332 // Stop any TransitionToIdle() timers that we may have queued
335 max_preemption_time_
= timer_
.desired_run_time() - base::TimeTicks::Now();
336 if (max_preemption_time_
< base::TimeDelta()) {
342 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
343 preempting_flag_
->Reset();
344 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
346 UpdatePreemptionState();
349 static void InsertSyncPointOnMainThread(
350 base::WeakPtr
<GpuChannel
> gpu_channel
,
351 scoped_refptr
<SyncPointManager
> manager
,
355 // This function must ensure that the sync point will be retired. Normally
356 // we'll find the stub based on the routing ID, and associate the sync point
357 // with it, but if that fails for any reason (channel or stub already
358 // deleted, invalid routing id), we need to retire the sync point
361 GpuCommandBufferStub
* stub
= gpu_channel
->LookupCommandBuffer(routing_id
);
363 stub
->AddSyncPoint(sync_point
);
365 GpuCommandBufferMsg_RetireSyncPoint
message(routing_id
, sync_point
);
366 gpu_channel
->OnMessageReceived(message
);
370 gpu_channel
->MessageProcessed();
373 manager
->RetireSyncPoint(sync_point
);
376 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
377 // passed through - therefore the WeakPtr assumptions are respected.
378 base::WeakPtr
<GpuChannel
> gpu_channel_
;
379 IPC::Sender
* sender_
;
380 scoped_refptr
<SyncPointManager
> sync_point_manager_
;
381 scoped_refptr
<base::MessageLoopProxy
> message_loop_
;
382 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
384 std::queue
<PendingMessage
> pending_messages_
;
386 // Count of the number of IPCs forwarded to the GpuChannel.
387 uint64 messages_forwarded_to_channel_
;
389 base::OneShotTimer
<GpuChannelMessageFilter
> timer_
;
391 bool a_stub_is_descheduled_
;
393 // True if this channel can create future sync points.
394 bool future_sync_points_
;
397 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
398 GpuWatchdog
* watchdog
,
399 gfx::GLShareGroup
* share_group
,
400 gpu::gles2::MailboxManager
* mailbox
,
403 bool allow_future_sync_points
)
404 : gpu_channel_manager_(gpu_channel_manager
),
405 messages_processed_(0),
406 client_id_(client_id
),
407 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
408 mailbox_manager_(mailbox
? mailbox
: new gpu::gles2::MailboxManager
),
411 handle_messages_scheduled_(false),
412 currently_processing_message_(NULL
),
414 num_stubs_descheduled_(0),
415 allow_future_sync_points_(allow_future_sync_points
) {
416 DCHECK(gpu_channel_manager
);
419 channel_id_
= IPC::Channel::GenerateVerifiedChannelID("gpu");
420 const base::CommandLine
* command_line
=
421 base::CommandLine::ForCurrentProcess();
422 log_messages_
= command_line
->HasSwitch(switches::kLogPluginMessages
);
425 GpuChannel::~GpuChannel() {
426 STLDeleteElements(&deferred_messages_
);
427 if (preempting_flag_
.get())
428 preempting_flag_
->Reset();
431 void GpuChannel::Init(base::MessageLoopProxy
* io_message_loop
,
432 base::WaitableEvent
* shutdown_event
) {
433 DCHECK(!channel_
.get());
435 // Map renderer ID to a (single) channel to that process.
436 channel_
= IPC::SyncChannel::Create(channel_id_
,
437 IPC::Channel::MODE_SERVER
,
444 new GpuChannelMessageFilter(weak_factory_
.GetWeakPtr(),
445 gpu_channel_manager_
->sync_point_manager(),
446 base::MessageLoopProxy::current(),
447 allow_future_sync_points_
);
448 io_message_loop_
= io_message_loop
;
449 channel_
->AddFilter(filter_
.get());
451 devtools_gpu_agent_
.reset(new DevToolsGpuAgent(this));
454 std::string
GpuChannel::GetChannelName() {
458 #if defined(OS_POSIX)
459 int GpuChannel::TakeRendererFileDescriptor() {
464 return channel_
->TakeClientFileDescriptor();
466 #endif // defined(OS_POSIX)
468 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
470 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
471 << " with type " << message
.type();
474 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
475 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
476 // Move Wait commands to the head of the queue, so the renderer
477 // doesn't have to wait any longer than necessary.
478 deferred_messages_
.push_front(new IPC::Message(message
));
480 deferred_messages_
.push_back(new IPC::Message(message
));
488 void GpuChannel::OnChannelError() {
489 gpu_channel_manager_
->RemoveChannel(client_id_
);
492 bool GpuChannel::Send(IPC::Message
* message
) {
493 // The GPU process must never send a synchronous IPC message to the renderer
494 // process. This could result in deadlock.
495 DCHECK(!message
->is_sync());
497 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
498 << " with type " << message
->type();
506 return channel_
->Send(message
);
509 void GpuChannel::RequeueMessage() {
510 DCHECK(currently_processing_message_
);
511 deferred_messages_
.push_front(
512 new IPC::Message(*currently_processing_message_
));
513 messages_processed_
--;
514 currently_processing_message_
= NULL
;
517 void GpuChannel::OnScheduled() {
518 if (handle_messages_scheduled_
)
520 // Post a task to handle any deferred messages. The deferred message queue is
521 // not emptied here, which ensures that OnMessageReceived will continue to
522 // defer newly received messages until the ones in the queue have all been
523 // handled by HandleMessage. HandleMessage is invoked as a
524 // task to prevent reentrancy.
525 base::MessageLoop::current()->PostTask(
527 base::Bind(&GpuChannel::HandleMessage
, weak_factory_
.GetWeakPtr()));
528 handle_messages_scheduled_
= true;
531 void GpuChannel::StubSchedulingChanged(bool scheduled
) {
532 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
534 num_stubs_descheduled_
--;
537 num_stubs_descheduled_
++;
539 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
540 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
542 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
543 if (preempting_flag_
.get()) {
544 io_message_loop_
->PostTask(
546 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
548 a_stub_is_descheduled
));
553 CreateCommandBufferResult
GpuChannel::CreateViewCommandBuffer(
554 const gfx::GLSurfaceHandle
& window
,
556 const GPUCreateCommandBufferConfig
& init_params
,
559 "GpuChannel::CreateViewCommandBuffer",
563 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
565 // Virtualize compositor contexts on OS X to prevent performance regressions
566 // when enabling FCM.
567 // http://crbug.com/180463
568 bool use_virtualized_gl_context
= false;
569 #if defined(OS_MACOSX)
570 use_virtualized_gl_context
= true;
573 scoped_ptr
<GpuCommandBufferStub
> stub(
574 new GpuCommandBufferStub(this,
577 mailbox_manager_
.get(),
579 disallowed_features_
,
581 init_params
.gpu_preference
,
582 use_virtualized_gl_context
,
587 init_params
.active_url
));
588 if (preempted_flag_
.get())
589 stub
->SetPreemptByFlag(preempted_flag_
);
590 if (!router_
.AddRoute(route_id
, stub
.get())) {
591 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
592 "failed to add route";
593 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
;
595 stubs_
.AddWithID(stub
.release(), route_id
);
596 return CREATE_COMMAND_BUFFER_SUCCEEDED
;
599 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
600 return stubs_
.Lookup(route_id
);
603 void GpuChannel::LoseAllContexts() {
604 gpu_channel_manager_
->LoseAllContexts();
607 void GpuChannel::MarkAllContextsLost() {
608 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
609 !it
.IsAtEnd(); it
.Advance()) {
610 it
.GetCurrentValue()->MarkContextLost();
614 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
615 return router_
.AddRoute(route_id
, listener
);
618 void GpuChannel::RemoveRoute(int32 route_id
) {
619 router_
.RemoveRoute(route_id
);
622 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
623 if (!preempting_flag_
.get()) {
624 preempting_flag_
= new gpu::PreemptionFlag
;
625 io_message_loop_
->PostTask(
626 FROM_HERE
, base::Bind(
627 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
628 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
630 return preempting_flag_
.get();
633 void GpuChannel::SetPreemptByFlag(
634 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
635 preempted_flag_
= preempted_flag
;
637 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
638 !it
.IsAtEnd(); it
.Advance()) {
639 it
.GetCurrentValue()->SetPreemptByFlag(preempted_flag_
);
643 void GpuChannel::OnDestroy() {
644 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
645 gpu_channel_manager_
->RemoveChannel(client_id_
);
648 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
650 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
651 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
652 OnCreateOffscreenCommandBuffer
)
653 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
654 OnDestroyCommandBuffer
)
655 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording
,
656 OnDevToolsStartEventsRecording
)
657 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording
,
658 OnDevToolsStopEventsRecording
)
659 IPC_MESSAGE_UNHANDLED(handled
= false)
660 IPC_END_MESSAGE_MAP()
661 DCHECK(handled
) << msg
.type();
665 size_t GpuChannel::MatchSwapBufferMessagesPattern(
666 IPC::Message
* current_message
) {
667 DCHECK(current_message
);
668 if (deferred_messages_
.empty() || !current_message
)
670 // Only care about SetLatencyInfo and AsyncFlush message.
671 if (current_message
->type() != GpuCommandBufferMsg_SetLatencyInfo::ID
&&
672 current_message
->type() != GpuCommandBufferMsg_AsyncFlush::ID
)
676 int32 routing_id
= current_message
->routing_id();
678 // In case of the current message is SetLatencyInfo, we try to look ahead one
679 // more deferred messages.
680 IPC::Message
*first_message
= NULL
;
681 IPC::Message
*second_message
= NULL
;
683 // Fetch the first message and move index to point to the second message.
684 first_message
= deferred_messages_
[index
++];
686 // If the current message is AsyncFlush, the expected message sequence for
687 // SwapBuffer should be AsyncFlush->Echo. We only try to match Echo message.
688 if (current_message
->type() == GpuCommandBufferMsg_AsyncFlush::ID
&&
689 first_message
->type() == GpuCommandBufferMsg_Echo::ID
&&
690 first_message
->routing_id() == routing_id
) {
694 // If the current message is SetLatencyInfo, the expected message sequence
695 // for SwapBuffer should be SetLatencyInfo->AsyncFlush->Echo (optional).
696 if (current_message
->type() == GpuCommandBufferMsg_SetLatencyInfo::ID
&&
697 first_message
->type() == GpuCommandBufferMsg_AsyncFlush::ID
&&
698 first_message
->routing_id() == routing_id
) {
699 if (deferred_messages_
.size() >= 2)
700 second_message
= deferred_messages_
[index
];
703 if (second_message
->type() == GpuCommandBufferMsg_Echo::ID
&&
704 second_message
->routing_id() == routing_id
) {
708 // No matched message is found.
712 void GpuChannel::HandleMessage() {
713 handle_messages_scheduled_
= false;
714 if (deferred_messages_
.empty())
717 size_t matched_messages_num
= 0;
718 bool should_handle_swapbuffer_msgs_immediate
= false;
719 IPC::Message
* m
= NULL
;
720 GpuCommandBufferStub
* stub
= NULL
;
723 m
= deferred_messages_
.front();
724 stub
= stubs_
.Lookup(m
->routing_id());
726 if (!stub
->IsScheduled())
728 if (stub
->IsPreempted()) {
734 scoped_ptr
<IPC::Message
> message(m
);
735 deferred_messages_
.pop_front();
736 bool message_processed
= true;
738 currently_processing_message_
= message
.get();
740 if (message
->routing_id() == MSG_ROUTING_CONTROL
)
741 result
= OnControlMessageReceived(*message
);
743 result
= router_
.RouteMessage(*message
);
744 currently_processing_message_
= NULL
;
747 // Respond to sync messages even if router failed to route.
748 if (message
->is_sync()) {
749 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&*message
);
750 reply
->set_reply_error();
754 // If the command buffer becomes unscheduled as a result of handling the
755 // message but still has more commands to process, synthesize an IPC
756 // message to flush that command buffer.
758 if (stub
->HasUnprocessedCommands()) {
759 deferred_messages_
.push_front(new GpuCommandBufferMsg_Rescheduled(
761 message_processed
= false;
765 if (message_processed
)
768 if (deferred_messages_
.empty())
771 // We process the pending messages immediately if these messages matches
772 // the pattern of SwapBuffers, for example, GLRenderer always issues
773 // SwapBuffers calls with a specific IPC message patterns, for example,
774 // it should be SetLatencyInfo->AsyncFlush->Echo sequence.
776 // Instead of posting a task to message loop, it could avoid the possibility
777 // of being blocked by other channels, and make SwapBuffers executed as soon
779 if (!should_handle_swapbuffer_msgs_immediate
) {
780 // Start from the current processing message to match SwapBuffer pattern.
781 matched_messages_num
= MatchSwapBufferMessagesPattern(message
.get());
782 should_handle_swapbuffer_msgs_immediate
=
783 matched_messages_num
> 0 && stub
;
785 DCHECK_GT(matched_messages_num
, 0u);
786 --matched_messages_num
;
787 if (!stub
|| matched_messages_num
== 0)
788 should_handle_swapbuffer_msgs_immediate
= false;
790 } while (should_handle_swapbuffer_msgs_immediate
);
792 if (!deferred_messages_
.empty()) {
797 void GpuChannel::OnCreateOffscreenCommandBuffer(
798 const gfx::Size
& size
,
799 const GPUCreateCommandBufferConfig
& init_params
,
802 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
803 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
805 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
808 gfx::GLSurfaceHandle(),
809 mailbox_manager_
.get(),
811 disallowed_features_
,
813 init_params
.gpu_preference
,
819 init_params
.active_url
));
820 if (preempted_flag_
.get())
821 stub
->SetPreemptByFlag(preempted_flag_
);
822 if (!router_
.AddRoute(route_id
, stub
.get())) {
823 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
824 "failed to add route";
828 stubs_
.AddWithID(stub
.release(), route_id
);
829 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
830 "route_id", route_id
);
834 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
835 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
836 "route_id", route_id
);
838 GpuCommandBufferStub
* stub
= stubs_
.Lookup(route_id
);
841 bool need_reschedule
= (stub
&& !stub
->IsScheduled());
842 router_
.RemoveRoute(route_id
);
843 stubs_
.Remove(route_id
);
844 // In case the renderer is currently blocked waiting for a sync reply from the
845 // stub, we need to make sure to reschedule the GpuChannel here.
846 if (need_reschedule
) {
847 // This stub won't get a chance to reschedule, so update the count now.
848 StubSchedulingChanged(true);
852 void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id
,
854 *succeeded
= devtools_gpu_agent_
->StartEventsRecording(route_id
);
857 void GpuChannel::OnDevToolsStopEventsRecording() {
858 devtools_gpu_agent_
->StopEventsRecording();
861 void GpuChannel::MessageProcessed() {
862 messages_processed_
++;
863 if (preempting_flag_
.get()) {
864 io_message_loop_
->PostTask(
866 base::Bind(&GpuChannelMessageFilter::MessageProcessed
,
868 messages_processed_
));
872 void GpuChannel::CacheShader(const std::string
& key
,
873 const std::string
& shader
) {
874 gpu_channel_manager_
->Send(
875 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
878 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
879 channel_
->AddFilter(filter
);
882 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
883 channel_
->RemoveFilter(filter
);
886 uint64
GpuChannel::GetMemoryUsage() {
888 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
889 !it
.IsAtEnd(); it
.Advance()) {
890 size
+= it
.GetCurrentValue()->GetMemoryUsage();
895 } // namespace content