1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "content/common/gpu/devtools_gpu_agent.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/sync_point_manager.h"
24 #include "content/public/common/content_switches.h"
25 #include "gpu/command_buffer/common/mailbox.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/image_manager.h"
28 #include "gpu/command_buffer/service/mailbox_manager.h"
29 #include "ipc/ipc_channel.h"
30 #include "ipc/message_filter.h"
31 #include "ui/gl/gl_context.h"
32 #include "ui/gl/gl_image.h"
33 #include "ui/gl/gl_surface.h"
36 #include "ipc/ipc_channel_posix.h"
42 // Number of milliseconds between successive vsync. Many GL commands block
43 // on vsync, so thresholds for preemption should be multiples of this.
44 const int64 kVsyncIntervalMs
= 17;
46 // Amount of time that we will wait for an IPC to be processed before
47 // preempting. After a preemption, we must wait this long before triggering
48 // another preemption.
49 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
51 // Once we trigger a preemption, the maximum duration that we will wait
52 // before clearing the preemption.
53 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
55 // Stop the preemption once the time for the longest pending IPC drops
56 // below this threshold.
57 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
59 } // anonymous namespace
61 // This filter does three things:
62 // - it counts and timestamps each message forwarded to the channel
63 // so that we can preempt other channels if a message takes too long to
64 // process. To guarantee fairness, we must wait a minimum amount of time
65 // before preempting and we limit the amount of time that we can preempt in
66 // one shot (see constants above).
67 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
68 // thread, generating the sync point ID and responding immediately, and then
69 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
70 // into the channel's queue.
71 // - it generates mailbox names for clients of the GPU process on the IO thread.
72 class GpuChannelMessageFilter
: public IPC::MessageFilter
{
74 // Takes ownership of gpu_channel (see below).
75 GpuChannelMessageFilter(base::WeakPtr
<GpuChannel
>* gpu_channel
,
76 scoped_refptr
<SyncPointManager
> sync_point_manager
,
77 scoped_refptr
<base::MessageLoopProxy
> message_loop
)
78 : preemption_state_(IDLE
),
79 gpu_channel_(gpu_channel
),
81 sync_point_manager_(sync_point_manager
),
82 message_loop_(message_loop
),
83 messages_forwarded_to_channel_(0),
84 a_stub_is_descheduled_(false) {
87 virtual void OnFilterAdded(IPC::Channel
* channel
) OVERRIDE
{
92 virtual void OnFilterRemoved() OVERRIDE
{
97 virtual bool OnMessageReceived(const IPC::Message
& message
) OVERRIDE
{
100 bool handled
= false;
101 if (message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) {
102 // This message should not be sent explicitly by the renderer.
103 DLOG(ERROR
) << "Client should not send "
104 "GpuCommandBufferMsg_RetireSyncPoint message";
108 // All other messages get processed by the GpuChannel.
110 messages_forwarded_to_channel_
++;
111 if (preempting_flag_
.get())
112 pending_messages_
.push(PendingMessage(messages_forwarded_to_channel_
));
113 UpdatePreemptionState();
116 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
117 uint32 sync_point
= sync_point_manager_
->GenerateSyncPoint();
118 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
119 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
121 message_loop_
->PostTask(FROM_HERE
, base::Bind(
122 &GpuChannelMessageFilter::InsertSyncPointOnMainThread
,
125 message
.routing_id(),
132 void MessageProcessed(uint64 messages_processed
) {
133 while (!pending_messages_
.empty() &&
134 pending_messages_
.front().message_number
<= messages_processed
)
135 pending_messages_
.pop();
136 UpdatePreemptionState();
139 void SetPreemptingFlagAndSchedulingState(
140 gpu::PreemptionFlag
* preempting_flag
,
141 bool a_stub_is_descheduled
) {
142 preempting_flag_
= preempting_flag
;
143 a_stub_is_descheduled_
= a_stub_is_descheduled
;
146 void UpdateStubSchedulingState(bool a_stub_is_descheduled
) {
147 a_stub_is_descheduled_
= a_stub_is_descheduled
;
148 UpdatePreemptionState();
151 bool Send(IPC::Message
* message
) {
152 return channel_
->Send(message
);
156 virtual ~GpuChannelMessageFilter() {
157 message_loop_
->PostTask(FROM_HERE
, base::Bind(
158 &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread
, gpu_channel_
));
162 enum PreemptionState
{
163 // Either there's no other channel to preempt, there are no messages
164 // pending processing, or we just finished preempting and have to wait
165 // before preempting again.
167 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
169 // We can preempt whenever any IPC processing takes more than
170 // kPreemptWaitTimeMs.
172 // We are currently preempting (i.e. no stub is descheduled).
174 // We would like to preempt, but some stub is descheduled.
175 WOULD_PREEMPT_DESCHEDULED
,
178 PreemptionState preemption_state_
;
180 // Maximum amount of time that we can spend in PREEMPTING.
181 // It is reset when we transition to IDLE.
182 base::TimeDelta max_preemption_time_
;
184 struct PendingMessage
{
185 uint64 message_number
;
186 base::TimeTicks time_received
;
188 explicit PendingMessage(uint64 message_number
)
189 : message_number(message_number
),
190 time_received(base::TimeTicks::Now()) {
194 void UpdatePreemptionState() {
195 switch (preemption_state_
) {
197 if (preempting_flag_
.get() && !pending_messages_
.empty())
198 TransitionToWaiting();
201 // A timer will transition us to CHECKING.
202 DCHECK(timer_
.IsRunning());
205 if (!pending_messages_
.empty()) {
206 base::TimeDelta time_elapsed
=
207 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
208 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
209 // Schedule another check for when the IPC may go long.
212 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
214 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
216 if (a_stub_is_descheduled_
)
217 TransitionToWouldPreemptDescheduled();
219 TransitionToPreempting();
224 // A TransitionToIdle() timer should always be running in this state.
225 DCHECK(timer_
.IsRunning());
226 if (a_stub_is_descheduled_
)
227 TransitionToWouldPreemptDescheduled();
229 TransitionToIdleIfCaughtUp();
231 case WOULD_PREEMPT_DESCHEDULED
:
232 // A TransitionToIdle() timer should never be running in this state.
233 DCHECK(!timer_
.IsRunning());
234 if (!a_stub_is_descheduled_
)
235 TransitionToPreempting();
237 TransitionToIdleIfCaughtUp();
244 void TransitionToIdleIfCaughtUp() {
245 DCHECK(preemption_state_
== PREEMPTING
||
246 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
247 if (pending_messages_
.empty()) {
250 base::TimeDelta time_elapsed
=
251 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
252 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
257 void TransitionToIdle() {
258 DCHECK(preemption_state_
== PREEMPTING
||
259 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
260 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
263 preemption_state_
= IDLE
;
264 preempting_flag_
->Reset();
265 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
267 UpdatePreemptionState();
270 void TransitionToWaiting() {
271 DCHECK_EQ(preemption_state_
, IDLE
);
272 DCHECK(!timer_
.IsRunning());
274 preemption_state_
= WAITING
;
277 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
),
278 this, &GpuChannelMessageFilter::TransitionToChecking
);
281 void TransitionToChecking() {
282 DCHECK_EQ(preemption_state_
, WAITING
);
283 DCHECK(!timer_
.IsRunning());
285 preemption_state_
= CHECKING
;
286 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
287 UpdatePreemptionState();
290 void TransitionToPreempting() {
291 DCHECK(preemption_state_
== CHECKING
||
292 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
293 DCHECK(!a_stub_is_descheduled_
);
295 // Stop any pending state update checks that we may have queued
297 if (preemption_state_
== CHECKING
)
300 preemption_state_
= PREEMPTING
;
301 preempting_flag_
->Set();
302 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
306 max_preemption_time_
,
307 this, &GpuChannelMessageFilter::TransitionToIdle
);
309 UpdatePreemptionState();
312 void TransitionToWouldPreemptDescheduled() {
313 DCHECK(preemption_state_
== CHECKING
||
314 preemption_state_
== PREEMPTING
);
315 DCHECK(a_stub_is_descheduled_
);
317 if (preemption_state_
== CHECKING
) {
318 // Stop any pending state update checks that we may have queued
322 // Stop any TransitionToIdle() timers that we may have queued
325 max_preemption_time_
= timer_
.desired_run_time() - base::TimeTicks::Now();
326 if (max_preemption_time_
< base::TimeDelta()) {
332 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
333 preempting_flag_
->Reset();
334 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
336 UpdatePreemptionState();
339 static void InsertSyncPointOnMainThread(
340 base::WeakPtr
<GpuChannel
>* gpu_channel
,
341 scoped_refptr
<SyncPointManager
> manager
,
344 // This function must ensure that the sync point will be retired. Normally
345 // we'll find the stub based on the routing ID, and associate the sync point
346 // with it, but if that fails for any reason (channel or stub already
347 // deleted, invalid routing id), we need to retire the sync point
349 if (gpu_channel
->get()) {
350 GpuCommandBufferStub
* stub
= gpu_channel
->get()->LookupCommandBuffer(
353 stub
->AddSyncPoint(sync_point
);
354 GpuCommandBufferMsg_RetireSyncPoint
message(routing_id
, sync_point
);
355 gpu_channel
->get()->OnMessageReceived(message
);
358 gpu_channel
->get()->MessageProcessed();
361 manager
->RetireSyncPoint(sync_point
);
364 static void DeleteWeakPtrOnMainThread(
365 base::WeakPtr
<GpuChannel
>* gpu_channel
) {
369 // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
370 // IO thread, it's only passed through - therefore the WeakPtr assumptions are
372 base::WeakPtr
<GpuChannel
>* gpu_channel_
;
373 IPC::Channel
* channel_
;
374 scoped_refptr
<SyncPointManager
> sync_point_manager_
;
375 scoped_refptr
<base::MessageLoopProxy
> message_loop_
;
376 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
378 std::queue
<PendingMessage
> pending_messages_
;
380 // Count of the number of IPCs forwarded to the GpuChannel.
381 uint64 messages_forwarded_to_channel_
;
383 base::OneShotTimer
<GpuChannelMessageFilter
> timer_
;
385 bool a_stub_is_descheduled_
;
388 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
389 GpuWatchdog
* watchdog
,
390 gfx::GLShareGroup
* share_group
,
391 gpu::gles2::MailboxManager
* mailbox
,
394 : gpu_channel_manager_(gpu_channel_manager
),
395 messages_processed_(0),
396 client_id_(client_id
),
397 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
398 mailbox_manager_(mailbox
? mailbox
: new gpu::gles2::MailboxManager
),
399 image_manager_(new gpu::gles2::ImageManager
),
402 handle_messages_scheduled_(false),
403 currently_processing_message_(NULL
),
405 num_stubs_descheduled_(0) {
406 DCHECK(gpu_channel_manager
);
409 channel_id_
= IPC::Channel::GenerateVerifiedChannelID("gpu");
410 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
411 log_messages_
= command_line
->HasSwitch(switches::kLogPluginMessages
);
415 void GpuChannel::Init(base::MessageLoopProxy
* io_message_loop
,
416 base::WaitableEvent
* shutdown_event
) {
417 DCHECK(!channel_
.get());
419 // Map renderer ID to a (single) channel to that process.
420 channel_
.reset(new IPC::SyncChannel(
422 IPC::Channel::MODE_SERVER
,
428 base::WeakPtr
<GpuChannel
>* weak_ptr(new base::WeakPtr
<GpuChannel
>(
429 weak_factory_
.GetWeakPtr()));
431 filter_
= new GpuChannelMessageFilter(
433 gpu_channel_manager_
->sync_point_manager(),
434 base::MessageLoopProxy::current());
435 io_message_loop_
= io_message_loop
;
436 channel_
->AddFilter(filter_
.get());
438 devtools_gpu_agent_
.reset(new DevToolsGpuAgent(this));
441 std::string
GpuChannel::GetChannelName() {
445 #if defined(OS_POSIX)
446 int GpuChannel::TakeRendererFileDescriptor() {
451 return channel_
->TakeClientFileDescriptor();
453 #endif // defined(OS_POSIX)
455 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
457 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
458 << " with type " << message
.type();
461 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
462 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
463 // Move Wait commands to the head of the queue, so the renderer
464 // doesn't have to wait any longer than necessary.
465 deferred_messages_
.push_front(new IPC::Message(message
));
467 deferred_messages_
.push_back(new IPC::Message(message
));
475 void GpuChannel::OnChannelError() {
476 gpu_channel_manager_
->RemoveChannel(client_id_
);
479 bool GpuChannel::Send(IPC::Message
* message
) {
480 // The GPU process must never send a synchronous IPC message to the renderer
481 // process. This could result in deadlock.
482 DCHECK(!message
->is_sync());
484 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
485 << " with type " << message
->type();
493 return channel_
->Send(message
);
496 void GpuChannel::RequeueMessage() {
497 DCHECK(currently_processing_message_
);
498 deferred_messages_
.push_front(
499 new IPC::Message(*currently_processing_message_
));
500 messages_processed_
--;
501 currently_processing_message_
= NULL
;
504 void GpuChannel::OnScheduled() {
505 if (handle_messages_scheduled_
)
507 // Post a task to handle any deferred messages. The deferred message queue is
508 // not emptied here, which ensures that OnMessageReceived will continue to
509 // defer newly received messages until the ones in the queue have all been
510 // handled by HandleMessage. HandleMessage is invoked as a
511 // task to prevent reentrancy.
512 base::MessageLoop::current()->PostTask(
514 base::Bind(&GpuChannel::HandleMessage
, weak_factory_
.GetWeakPtr()));
515 handle_messages_scheduled_
= true;
518 void GpuChannel::StubSchedulingChanged(bool scheduled
) {
519 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
521 num_stubs_descheduled_
--;
524 num_stubs_descheduled_
++;
526 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
527 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
529 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
530 if (preempting_flag_
.get()) {
531 io_message_loop_
->PostTask(
533 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
535 a_stub_is_descheduled
));
540 bool GpuChannel::CreateViewCommandBuffer(
541 const gfx::GLSurfaceHandle
& window
,
543 const GPUCreateCommandBufferConfig
& init_params
,
546 "GpuChannel::CreateViewCommandBuffer",
550 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
552 // Virtualize compositor contexts on OS X to prevent performance regressions
553 // when enabling FCM.
554 // http://crbug.com/180463
555 bool use_virtualized_gl_context
= false;
556 #if defined(OS_MACOSX)
557 use_virtualized_gl_context
= true;
560 scoped_ptr
<GpuCommandBufferStub
> stub(
561 new GpuCommandBufferStub(this,
564 mailbox_manager_
.get(),
565 image_manager_
.get(),
567 disallowed_features_
,
569 init_params
.gpu_preference
,
570 use_virtualized_gl_context
,
575 init_params
.active_url
));
576 if (preempted_flag_
.get())
577 stub
->SetPreemptByFlag(preempted_flag_
);
578 if (!router_
.AddRoute(route_id
, stub
.get())) {
579 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
580 "failed to add route";
583 stubs_
.AddWithID(stub
.release(), route_id
);
587 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
588 return stubs_
.Lookup(route_id
);
591 void GpuChannel::CreateImage(
592 gfx::PluginWindowHandle window
,
596 "GpuChannel::CreateImage",
602 if (image_manager_
->LookupImage(image_id
)) {
603 LOG(ERROR
) << "CreateImage failed, image_id already in use.";
607 scoped_refptr
<gfx::GLImage
> image
= gfx::GLImage::CreateGLImage(window
);
611 image_manager_
->AddImage(image
.get(), image_id
);
612 *size
= image
->GetSize();
615 void GpuChannel::DeleteImage(int32 image_id
) {
617 "GpuChannel::DeleteImage",
621 image_manager_
->RemoveImage(image_id
);
624 void GpuChannel::LoseAllContexts() {
625 gpu_channel_manager_
->LoseAllContexts();
628 void GpuChannel::MarkAllContextsLost() {
629 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
630 !it
.IsAtEnd(); it
.Advance()) {
631 it
.GetCurrentValue()->MarkContextLost();
635 void GpuChannel::DestroySoon() {
636 base::MessageLoop::current()->PostTask(
637 FROM_HERE
, base::Bind(&GpuChannel::OnDestroy
, this));
640 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
641 return router_
.AddRoute(route_id
, listener
);
644 void GpuChannel::RemoveRoute(int32 route_id
) {
645 router_
.RemoveRoute(route_id
);
648 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
649 if (!preempting_flag_
.get()) {
650 preempting_flag_
= new gpu::PreemptionFlag
;
651 io_message_loop_
->PostTask(
652 FROM_HERE
, base::Bind(
653 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
654 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
656 return preempting_flag_
.get();
659 void GpuChannel::SetPreemptByFlag(
660 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
661 preempted_flag_
= preempted_flag
;
663 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
664 !it
.IsAtEnd(); it
.Advance()) {
665 it
.GetCurrentValue()->SetPreemptByFlag(preempted_flag_
);
669 GpuChannel::~GpuChannel() {
670 if (preempting_flag_
.get())
671 preempting_flag_
->Reset();
674 void GpuChannel::OnDestroy() {
675 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
676 gpu_channel_manager_
->RemoveChannel(client_id_
);
679 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
681 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
682 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
683 OnCreateOffscreenCommandBuffer
)
684 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
685 OnDestroyCommandBuffer
)
686 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStartEventsRecording
,
687 OnDevToolsStartEventsRecording
)
688 IPC_MESSAGE_HANDLER(GpuChannelMsg_DevToolsStopEventsRecording
,
689 OnDevToolsStopEventsRecording
)
690 IPC_MESSAGE_UNHANDLED(handled
= false)
691 IPC_END_MESSAGE_MAP()
692 DCHECK(handled
) << msg
.type();
696 void GpuChannel::HandleMessage() {
697 handle_messages_scheduled_
= false;
698 if (deferred_messages_
.empty())
701 bool should_fast_track_ack
= false;
702 IPC::Message
* m
= deferred_messages_
.front();
703 GpuCommandBufferStub
* stub
= stubs_
.Lookup(m
->routing_id());
707 if (!stub
->IsScheduled())
709 if (stub
->IsPreempted()) {
715 scoped_ptr
<IPC::Message
> message(m
);
716 deferred_messages_
.pop_front();
717 bool message_processed
= true;
719 currently_processing_message_
= message
.get();
721 if (message
->routing_id() == MSG_ROUTING_CONTROL
)
722 result
= OnControlMessageReceived(*message
);
724 result
= router_
.RouteMessage(*message
);
725 currently_processing_message_
= NULL
;
728 // Respond to sync messages even if router failed to route.
729 if (message
->is_sync()) {
730 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&*message
);
731 reply
->set_reply_error();
735 // If the command buffer becomes unscheduled as a result of handling the
736 // message but still has more commands to process, synthesize an IPC
737 // message to flush that command buffer.
739 if (stub
->HasUnprocessedCommands()) {
740 deferred_messages_
.push_front(new GpuCommandBufferMsg_Rescheduled(
742 message_processed
= false;
746 if (message_processed
)
749 // We want the EchoACK following the SwapBuffers to be sent as close as
750 // possible, avoiding scheduling other channels in the meantime.
751 should_fast_track_ack
= false;
752 if (!deferred_messages_
.empty()) {
753 m
= deferred_messages_
.front();
754 stub
= stubs_
.Lookup(m
->routing_id());
755 should_fast_track_ack
=
756 (m
->type() == GpuCommandBufferMsg_Echo::ID
) &&
757 stub
&& stub
->IsScheduled();
759 } while (should_fast_track_ack
);
761 if (!deferred_messages_
.empty()) {
766 void GpuChannel::OnCreateOffscreenCommandBuffer(
767 const gfx::Size
& size
,
768 const GPUCreateCommandBufferConfig
& init_params
,
771 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
772 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
774 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
777 gfx::GLSurfaceHandle(),
778 mailbox_manager_
.get(),
779 image_manager_
.get(),
781 disallowed_features_
,
783 init_params
.gpu_preference
,
789 init_params
.active_url
));
790 if (preempted_flag_
.get())
791 stub
->SetPreemptByFlag(preempted_flag_
);
792 if (!router_
.AddRoute(route_id
, stub
.get())) {
793 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
794 "failed to add route";
798 stubs_
.AddWithID(stub
.release(), route_id
);
799 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
800 "route_id", route_id
);
804 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
805 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
806 "route_id", route_id
);
808 GpuCommandBufferStub
* stub
= stubs_
.Lookup(route_id
);
811 bool need_reschedule
= (stub
&& !stub
->IsScheduled());
812 router_
.RemoveRoute(route_id
);
813 stubs_
.Remove(route_id
);
814 // In case the renderer is currently blocked waiting for a sync reply from the
815 // stub, we need to make sure to reschedule the GpuChannel here.
816 if (need_reschedule
) {
817 // This stub won't get a chance to reschedule, so update the count now.
818 StubSchedulingChanged(true);
822 void GpuChannel::OnDevToolsStartEventsRecording(int32 route_id
,
824 *succeeded
= devtools_gpu_agent_
->StartEventsRecording(route_id
);
827 void GpuChannel::OnDevToolsStopEventsRecording() {
828 devtools_gpu_agent_
->StopEventsRecording();
831 void GpuChannel::MessageProcessed() {
832 messages_processed_
++;
833 if (preempting_flag_
.get()) {
834 io_message_loop_
->PostTask(
836 base::Bind(&GpuChannelMessageFilter::MessageProcessed
,
838 messages_processed_
));
842 void GpuChannel::CacheShader(const std::string
& key
,
843 const std::string
& shader
) {
844 gpu_channel_manager_
->Send(
845 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
848 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
849 channel_
->AddFilter(filter
);
852 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
853 channel_
->RemoveFilter(filter
);
856 uint64
GpuChannel::GetMemoryUsage() {
858 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
859 !it
.IsAtEnd(); it
.Advance()) {
860 size
+= it
.GetCurrentValue()->GetMemoryUsage();
865 } // namespace content