1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/stl_util.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "base/trace_event/trace_event.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_memory_buffer_factory.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/public/common/content_switches.h"
25 #include "gpu/command_buffer/common/mailbox.h"
26 #include "gpu/command_buffer/common/value_state.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
30 #include "gpu/command_buffer/service/sync_point_manager.h"
31 #include "gpu/command_buffer/service/valuebuffer_manager.h"
32 #include "ipc/ipc_channel.h"
33 #include "ipc/message_filter.h"
34 #include "ui/gl/gl_context.h"
35 #include "ui/gl/gl_image_shared_memory.h"
36 #include "ui/gl/gl_surface.h"
39 #include "ipc/ipc_channel_posix.h"
45 // Number of milliseconds between successive vsync. Many GL commands block
46 // on vsync, so thresholds for preemption should be multiples of this.
47 const int64 kVsyncIntervalMs
= 17;
49 // Amount of time that we will wait for an IPC to be processed before
50 // preempting. After a preemption, we must wait this long before triggering
51 // another preemption.
52 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
54 // Once we trigger a preemption, the maximum duration that we will wait
55 // before clearing the preemption.
56 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
58 // Stop the preemption once the time for the longest pending IPC drops
59 // below this threshold.
60 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
62 } // anonymous namespace
64 // This filter does three things:
65 // - it counts and timestamps each message forwarded to the channel
66 // so that we can preempt other channels if a message takes too long to
67 // process. To guarantee fairness, we must wait a minimum amount of time
68 // before preempting and we limit the amount of time that we can preempt in
69 // one shot (see constants above).
70 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
71 // thread, generating the sync point ID and responding immediately, and then
72 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
73 // into the channel's queue.
74 // - it generates mailbox names for clients of the GPU process on the IO thread.
75 class GpuChannelMessageFilter
: public IPC::MessageFilter
{
77 GpuChannelMessageFilter(
78 base::WeakPtr
<GpuChannel
> gpu_channel
,
79 scoped_refptr
<gpu::SyncPointManager
> sync_point_manager
,
80 scoped_refptr
<base::MessageLoopProxy
> message_loop
,
81 bool future_sync_points
)
82 : preemption_state_(IDLE
),
83 gpu_channel_(gpu_channel
),
85 sync_point_manager_(sync_point_manager
),
86 message_loop_(message_loop
),
87 messages_forwarded_to_channel_(0),
88 a_stub_is_descheduled_(false),
89 future_sync_points_(future_sync_points
) {}
91 void OnFilterAdded(IPC::Sender
* sender
) override
{
96 void OnFilterRemoved() override
{
101 bool OnMessageReceived(const IPC::Message
& message
) override
{
104 bool handled
= false;
105 if ((message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) &&
106 !future_sync_points_
) {
107 DLOG(ERROR
) << "Untrusted client should not send "
108 "GpuCommandBufferMsg_RetireSyncPoint message";
112 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
114 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
115 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message
,
117 reply
->set_reply_error();
121 if (!future_sync_points_
&& !get
<0>(retire
)) {
122 LOG(ERROR
) << "Untrusted contexts can't create future sync points";
123 reply
->set_reply_error();
127 uint32 sync_point
= sync_point_manager_
->GenerateSyncPoint();
128 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
130 message_loop_
->PostTask(
132 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread
,
135 message
.routing_id(),
141 // All other messages get processed by the GpuChannel.
142 messages_forwarded_to_channel_
++;
143 if (preempting_flag_
.get())
144 pending_messages_
.push(PendingMessage(messages_forwarded_to_channel_
));
145 UpdatePreemptionState();
150 void MessageProcessed(uint64 messages_processed
) {
151 while (!pending_messages_
.empty() &&
152 pending_messages_
.front().message_number
<= messages_processed
)
153 pending_messages_
.pop();
154 UpdatePreemptionState();
157 void SetPreemptingFlagAndSchedulingState(
158 gpu::PreemptionFlag
* preempting_flag
,
159 bool a_stub_is_descheduled
) {
160 preempting_flag_
= preempting_flag
;
161 a_stub_is_descheduled_
= a_stub_is_descheduled
;
164 void UpdateStubSchedulingState(bool a_stub_is_descheduled
) {
165 a_stub_is_descheduled_
= a_stub_is_descheduled
;
166 UpdatePreemptionState();
169 bool Send(IPC::Message
* message
) {
170 return sender_
->Send(message
);
174 ~GpuChannelMessageFilter() override
{}
177 enum PreemptionState
{
178 // Either there's no other channel to preempt, there are no messages
179 // pending processing, or we just finished preempting and have to wait
180 // before preempting again.
182 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
184 // We can preempt whenever any IPC processing takes more than
185 // kPreemptWaitTimeMs.
187 // We are currently preempting (i.e. no stub is descheduled).
189 // We would like to preempt, but some stub is descheduled.
190 WOULD_PREEMPT_DESCHEDULED
,
193 PreemptionState preemption_state_
;
195 // Maximum amount of time that we can spend in PREEMPTING.
196 // It is reset when we transition to IDLE.
197 base::TimeDelta max_preemption_time_
;
199 struct PendingMessage
{
200 uint64 message_number
;
201 base::TimeTicks time_received
;
203 explicit PendingMessage(uint64 message_number
)
204 : message_number(message_number
),
205 time_received(base::TimeTicks::Now()) {
209 void UpdatePreemptionState() {
210 switch (preemption_state_
) {
212 if (preempting_flag_
.get() && !pending_messages_
.empty())
213 TransitionToWaiting();
216 // A timer will transition us to CHECKING.
217 DCHECK(timer_
.IsRunning());
220 if (!pending_messages_
.empty()) {
221 base::TimeDelta time_elapsed
=
222 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
223 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
224 // Schedule another check for when the IPC may go long.
227 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
229 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
231 if (a_stub_is_descheduled_
)
232 TransitionToWouldPreemptDescheduled();
234 TransitionToPreempting();
239 // A TransitionToIdle() timer should always be running in this state.
240 DCHECK(timer_
.IsRunning());
241 if (a_stub_is_descheduled_
)
242 TransitionToWouldPreemptDescheduled();
244 TransitionToIdleIfCaughtUp();
246 case WOULD_PREEMPT_DESCHEDULED
:
247 // A TransitionToIdle() timer should never be running in this state.
248 DCHECK(!timer_
.IsRunning());
249 if (!a_stub_is_descheduled_
)
250 TransitionToPreempting();
252 TransitionToIdleIfCaughtUp();
259 void TransitionToIdleIfCaughtUp() {
260 DCHECK(preemption_state_
== PREEMPTING
||
261 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
262 if (pending_messages_
.empty()) {
265 base::TimeDelta time_elapsed
=
266 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
267 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
272 void TransitionToIdle() {
273 DCHECK(preemption_state_
== PREEMPTING
||
274 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
275 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
278 preemption_state_
= IDLE
;
279 preempting_flag_
->Reset();
280 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
282 UpdatePreemptionState();
285 void TransitionToWaiting() {
286 DCHECK_EQ(preemption_state_
, IDLE
);
287 DCHECK(!timer_
.IsRunning());
289 preemption_state_
= WAITING
;
292 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
),
293 this, &GpuChannelMessageFilter::TransitionToChecking
);
296 void TransitionToChecking() {
297 DCHECK_EQ(preemption_state_
, WAITING
);
298 DCHECK(!timer_
.IsRunning());
300 preemption_state_
= CHECKING
;
301 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
302 UpdatePreemptionState();
305 void TransitionToPreempting() {
306 DCHECK(preemption_state_
== CHECKING
||
307 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
308 DCHECK(!a_stub_is_descheduled_
);
310 // Stop any pending state update checks that we may have queued
312 if (preemption_state_
== CHECKING
)
315 preemption_state_
= PREEMPTING
;
316 preempting_flag_
->Set();
317 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
321 max_preemption_time_
,
322 this, &GpuChannelMessageFilter::TransitionToIdle
);
324 UpdatePreemptionState();
327 void TransitionToWouldPreemptDescheduled() {
328 DCHECK(preemption_state_
== CHECKING
||
329 preemption_state_
== PREEMPTING
);
330 DCHECK(a_stub_is_descheduled_
);
332 if (preemption_state_
== CHECKING
) {
333 // Stop any pending state update checks that we may have queued
337 // Stop any TransitionToIdle() timers that we may have queued
340 max_preemption_time_
= timer_
.desired_run_time() - base::TimeTicks::Now();
341 if (max_preemption_time_
< base::TimeDelta()) {
347 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
348 preempting_flag_
->Reset();
349 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
351 UpdatePreemptionState();
354 static void InsertSyncPointOnMainThread(
355 base::WeakPtr
<GpuChannel
> gpu_channel
,
356 scoped_refptr
<gpu::SyncPointManager
> manager
,
360 // This function must ensure that the sync point will be retired. Normally
361 // we'll find the stub based on the routing ID, and associate the sync point
362 // with it, but if that fails for any reason (channel or stub already
363 // deleted, invalid routing id), we need to retire the sync point
366 GpuCommandBufferStub
* stub
= gpu_channel
->LookupCommandBuffer(routing_id
);
368 stub
->AddSyncPoint(sync_point
);
370 GpuCommandBufferMsg_RetireSyncPoint
message(routing_id
, sync_point
);
371 gpu_channel
->OnMessageReceived(message
);
375 gpu_channel
->MessageProcessed();
378 manager
->RetireSyncPoint(sync_point
);
381 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
382 // passed through - therefore the WeakPtr assumptions are respected.
383 base::WeakPtr
<GpuChannel
> gpu_channel_
;
384 IPC::Sender
* sender_
;
385 scoped_refptr
<gpu::SyncPointManager
> sync_point_manager_
;
386 scoped_refptr
<base::MessageLoopProxy
> message_loop_
;
387 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
389 std::queue
<PendingMessage
> pending_messages_
;
391 // Count of the number of IPCs forwarded to the GpuChannel.
392 uint64 messages_forwarded_to_channel_
;
394 base::OneShotTimer
<GpuChannelMessageFilter
> timer_
;
396 bool a_stub_is_descheduled_
;
398 // True if this channel can create future sync points.
399 bool future_sync_points_
;
402 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
403 GpuWatchdog
* watchdog
,
404 gfx::GLShareGroup
* share_group
,
405 gpu::gles2::MailboxManager
* mailbox
,
408 bool allow_future_sync_points
)
409 : gpu_channel_manager_(gpu_channel_manager
),
410 messages_processed_(0),
411 client_id_(client_id
),
412 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
413 mailbox_manager_(mailbox
? mailbox
: new gpu::gles2::MailboxManagerImpl
),
416 handle_messages_scheduled_(false),
417 currently_processing_message_(NULL
),
418 num_stubs_descheduled_(0),
419 allow_future_sync_points_(allow_future_sync_points
),
420 weak_factory_(this) {
421 DCHECK(gpu_channel_manager
);
424 channel_id_
= IPC::Channel::GenerateVerifiedChannelID("gpu");
425 const base::CommandLine
* command_line
=
426 base::CommandLine::ForCurrentProcess();
427 log_messages_
= command_line
->HasSwitch(switches::kLogPluginMessages
);
429 subscription_ref_set_
= new gpu::gles2::SubscriptionRefSet();
430 subscription_ref_set_
->AddObserver(this);
433 GpuChannel::~GpuChannel() {
434 STLDeleteElements(&deferred_messages_
);
435 subscription_ref_set_
->RemoveObserver(this);
436 if (preempting_flag_
.get())
437 preempting_flag_
->Reset();
440 void GpuChannel::Init(base::MessageLoopProxy
* io_message_loop
,
441 base::WaitableEvent
* shutdown_event
) {
442 DCHECK(!channel_
.get());
444 // Map renderer ID to a (single) channel to that process.
445 channel_
= IPC::SyncChannel::Create(channel_id_
,
446 IPC::Channel::MODE_SERVER
,
453 new GpuChannelMessageFilter(weak_factory_
.GetWeakPtr(),
454 gpu_channel_manager_
->sync_point_manager(),
455 base::MessageLoopProxy::current(),
456 allow_future_sync_points_
);
457 io_message_loop_
= io_message_loop
;
458 channel_
->AddFilter(filter_
.get());
459 pending_valuebuffer_state_
= new gpu::ValueStateMap();
462 std::string
GpuChannel::GetChannelName() {
466 #if defined(OS_POSIX)
467 base::ScopedFD
GpuChannel::TakeRendererFileDescriptor() {
470 return base::ScopedFD();
472 return channel_
->TakeClientFileDescriptor();
474 #endif // defined(OS_POSIX)
476 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
478 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
479 << " with type " << message
.type();
482 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
483 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
484 // Move Wait commands to the head of the queue, so the renderer
485 // doesn't have to wait any longer than necessary.
486 deferred_messages_
.push_front(new IPC::Message(message
));
488 deferred_messages_
.push_back(new IPC::Message(message
));
496 void GpuChannel::OnChannelError() {
497 gpu_channel_manager_
->RemoveChannel(client_id_
);
500 bool GpuChannel::Send(IPC::Message
* message
) {
501 // The GPU process must never send a synchronous IPC message to the renderer
502 // process. This could result in deadlock.
503 DCHECK(!message
->is_sync());
505 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
506 << " with type " << message
->type();
514 return channel_
->Send(message
);
517 void GpuChannel::OnAddSubscription(unsigned int target
) {
518 gpu_channel_manager()->Send(
519 new GpuHostMsg_AddSubscription(client_id_
, target
));
522 void GpuChannel::OnRemoveSubscription(unsigned int target
) {
523 gpu_channel_manager()->Send(
524 new GpuHostMsg_RemoveSubscription(client_id_
, target
));
527 void GpuChannel::RequeueMessage() {
528 DCHECK(currently_processing_message_
);
529 deferred_messages_
.push_front(
530 new IPC::Message(*currently_processing_message_
));
531 messages_processed_
--;
532 currently_processing_message_
= NULL
;
535 void GpuChannel::OnScheduled() {
536 if (handle_messages_scheduled_
)
538 // Post a task to handle any deferred messages. The deferred message queue is
539 // not emptied here, which ensures that OnMessageReceived will continue to
540 // defer newly received messages until the ones in the queue have all been
541 // handled by HandleMessage. HandleMessage is invoked as a
542 // task to prevent reentrancy.
543 base::MessageLoop::current()->PostTask(
545 base::Bind(&GpuChannel::HandleMessage
, weak_factory_
.GetWeakPtr()));
546 handle_messages_scheduled_
= true;
549 void GpuChannel::StubSchedulingChanged(bool scheduled
) {
550 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
552 num_stubs_descheduled_
--;
555 num_stubs_descheduled_
++;
557 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
558 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
560 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
561 if (preempting_flag_
.get()) {
562 io_message_loop_
->PostTask(
564 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
566 a_stub_is_descheduled
));
571 CreateCommandBufferResult
GpuChannel::CreateViewCommandBuffer(
572 const gfx::GLSurfaceHandle
& window
,
574 const GPUCreateCommandBufferConfig
& init_params
,
577 "GpuChannel::CreateViewCommandBuffer",
581 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
583 // Virtualize compositor contexts on OS X to prevent performance regressions
584 // when enabling FCM.
585 // http://crbug.com/180463
586 bool use_virtualized_gl_context
= false;
587 #if defined(OS_MACOSX)
588 use_virtualized_gl_context
= true;
591 scoped_ptr
<GpuCommandBufferStub
> stub(
592 new GpuCommandBufferStub(this,
595 mailbox_manager_
.get(),
596 subscription_ref_set_
.get(),
597 pending_valuebuffer_state_
.get(),
599 disallowed_features_
,
601 init_params
.gpu_preference
,
602 use_virtualized_gl_context
,
607 init_params
.active_url
));
608 if (preempted_flag_
.get())
609 stub
->SetPreemptByFlag(preempted_flag_
);
610 if (!router_
.AddRoute(route_id
, stub
.get())) {
611 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
612 "failed to add route";
613 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
;
615 stubs_
.AddWithID(stub
.release(), route_id
);
616 return CREATE_COMMAND_BUFFER_SUCCEEDED
;
619 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
620 return stubs_
.Lookup(route_id
);
623 void GpuChannel::LoseAllContexts() {
624 gpu_channel_manager_
->LoseAllContexts();
627 void GpuChannel::MarkAllContextsLost() {
628 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
629 !it
.IsAtEnd(); it
.Advance()) {
630 it
.GetCurrentValue()->MarkContextLost();
634 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
635 return router_
.AddRoute(route_id
, listener
);
638 void GpuChannel::RemoveRoute(int32 route_id
) {
639 router_
.RemoveRoute(route_id
);
642 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
643 if (!preempting_flag_
.get()) {
644 preempting_flag_
= new gpu::PreemptionFlag
;
645 io_message_loop_
->PostTask(
646 FROM_HERE
, base::Bind(
647 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
648 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
650 return preempting_flag_
.get();
653 void GpuChannel::SetPreemptByFlag(
654 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
655 preempted_flag_
= preempted_flag
;
657 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
658 !it
.IsAtEnd(); it
.Advance()) {
659 it
.GetCurrentValue()->SetPreemptByFlag(preempted_flag_
);
663 void GpuChannel::OnDestroy() {
664 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
665 gpu_channel_manager_
->RemoveChannel(client_id_
);
668 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
670 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
671 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
672 OnCreateOffscreenCommandBuffer
)
673 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
674 OnDestroyCommandBuffer
)
675 IPC_MESSAGE_UNHANDLED(handled
= false)
676 IPC_END_MESSAGE_MAP()
677 DCHECK(handled
) << msg
.type();
681 void GpuChannel::HandleMessage() {
682 handle_messages_scheduled_
= false;
683 if (deferred_messages_
.empty())
686 IPC::Message
* m
= NULL
;
687 GpuCommandBufferStub
* stub
= NULL
;
689 m
= deferred_messages_
.front();
690 stub
= stubs_
.Lookup(m
->routing_id());
692 if (!stub
->IsScheduled())
694 if (stub
->IsPreempted()) {
700 scoped_ptr
<IPC::Message
> message(m
);
701 deferred_messages_
.pop_front();
702 bool message_processed
= true;
704 currently_processing_message_
= message
.get();
706 if (message
->routing_id() == MSG_ROUTING_CONTROL
)
707 result
= OnControlMessageReceived(*message
);
709 result
= router_
.RouteMessage(*message
);
710 currently_processing_message_
= NULL
;
713 // Respond to sync messages even if router failed to route.
714 if (message
->is_sync()) {
715 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&*message
);
716 reply
->set_reply_error();
720 // If the command buffer becomes unscheduled as a result of handling the
721 // message but still has more commands to process, synthesize an IPC
722 // message to flush that command buffer.
724 if (stub
->HasUnprocessedCommands()) {
725 deferred_messages_
.push_front(new GpuCommandBufferMsg_Rescheduled(
727 message_processed
= false;
731 if (message_processed
)
734 if (!deferred_messages_
.empty()) {
739 void GpuChannel::OnCreateOffscreenCommandBuffer(
740 const gfx::Size
& size
,
741 const GPUCreateCommandBufferConfig
& init_params
,
744 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
745 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
747 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
750 gfx::GLSurfaceHandle(),
751 mailbox_manager_
.get(),
752 subscription_ref_set_
.get(),
753 pending_valuebuffer_state_
.get(),
755 disallowed_features_
,
757 init_params
.gpu_preference
,
763 init_params
.active_url
));
764 if (preempted_flag_
.get())
765 stub
->SetPreemptByFlag(preempted_flag_
);
766 if (!router_
.AddRoute(route_id
, stub
.get())) {
767 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
768 "failed to add route";
772 stubs_
.AddWithID(stub
.release(), route_id
);
773 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
774 "route_id", route_id
);
778 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
779 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
780 "route_id", route_id
);
782 GpuCommandBufferStub
* stub
= stubs_
.Lookup(route_id
);
785 bool need_reschedule
= (stub
&& !stub
->IsScheduled());
786 router_
.RemoveRoute(route_id
);
787 stubs_
.Remove(route_id
);
788 // In case the renderer is currently blocked waiting for a sync reply from the
789 // stub, we need to make sure to reschedule the GpuChannel here.
790 if (need_reschedule
) {
791 // This stub won't get a chance to reschedule, so update the count now.
792 StubSchedulingChanged(true);
796 void GpuChannel::MessageProcessed() {
797 messages_processed_
++;
798 if (preempting_flag_
.get()) {
799 io_message_loop_
->PostTask(
801 base::Bind(&GpuChannelMessageFilter::MessageProcessed
,
803 messages_processed_
));
807 void GpuChannel::CacheShader(const std::string
& key
,
808 const std::string
& shader
) {
809 gpu_channel_manager_
->Send(
810 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
813 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
814 channel_
->AddFilter(filter
);
817 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
818 channel_
->RemoveFilter(filter
);
821 uint64
GpuChannel::GetMemoryUsage() {
823 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
824 !it
.IsAtEnd(); it
.Advance()) {
825 size
+= it
.GetCurrentValue()->GetMemoryUsage();
830 scoped_refptr
<gfx::GLImage
> GpuChannel::CreateImageForGpuMemoryBuffer(
831 const gfx::GpuMemoryBufferHandle
& handle
,
832 const gfx::Size
& size
,
833 gfx::GpuMemoryBuffer::Format format
,
834 uint32 internalformat
) {
835 switch (handle
.type
) {
836 case gfx::SHARED_MEMORY_BUFFER
: {
837 scoped_refptr
<gfx::GLImageSharedMemory
> image(
838 new gfx::GLImageSharedMemory(size
, internalformat
));
839 if (!image
->Initialize(handle
, format
))
840 return scoped_refptr
<gfx::GLImage
>();
845 GpuChannelManager
* manager
= gpu_channel_manager();
846 if (!manager
->gpu_memory_buffer_factory())
847 return scoped_refptr
<gfx::GLImage
>();
849 return manager
->gpu_memory_buffer_factory()
851 ->CreateImageForGpuMemoryBuffer(handle
,
860 void GpuChannel::HandleUpdateValueState(
861 unsigned int target
, const gpu::ValueState
& state
) {
862 pending_valuebuffer_state_
->UpdateState(target
, state
);
865 } // namespace content