1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/rand_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/timer/timer.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/common/gpu/sync_point_manager.h"
25 #include "content/public/common/content_switches.h"
26 #include "crypto/hmac.h"
27 #include "gpu/command_buffer/common/mailbox.h"
28 #include "gpu/command_buffer/service/gpu_scheduler.h"
29 #include "gpu/command_buffer/service/image_manager.h"
30 #include "gpu/command_buffer/service/mailbox_manager.h"
31 #include "ipc/ipc_channel.h"
32 #include "ipc/ipc_channel_proxy.h"
33 #include "ui/gl/gl_context.h"
34 #include "ui/gl/gl_image.h"
35 #include "ui/gl/gl_surface.h"
38 #include "ipc/ipc_channel_posix.h"
41 #if defined(OS_ANDROID)
42 #include "content/common/gpu/stream_texture_manager_android.h"
48 // Number of milliseconds between successive vsync. Many GL commands block
49 // on vsync, so thresholds for preemption should be multiples of this.
50 const int64 kVsyncIntervalMs
= 17;
52 // Amount of time that we will wait for an IPC to be processed before
53 // preempting. After a preemption, we must wait this long before triggering
54 // another preemption.
55 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
57 // Once we trigger a preemption, the maximum duration that we will wait
58 // before clearing the preemption.
59 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
61 // Stop the preemption once the time for the longest pending IPC drops
62 // below this threshold.
63 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
65 } // anonymous namespace
67 // This filter does three things:
68 // - it counts and timestamps each message forwarded to the channel
69 // so that we can preempt other channels if a message takes too long to
70 // process. To guarantee fairness, we must wait a minimum amount of time
71 // before preempting and we limit the amount of time that we can preempt in
72 // one shot (see constants above).
73 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
74 // thread, generating the sync point ID and responding immediately, and then
75 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
76 // into the channel's queue.
77 // - it generates mailbox names for clients of the GPU process on the IO thread.
78 class GpuChannelMessageFilter
: public IPC::ChannelProxy::MessageFilter
{
80 // Takes ownership of gpu_channel (see below).
81 GpuChannelMessageFilter(const std::string
& private_key
,
82 base::WeakPtr
<GpuChannel
>* gpu_channel
,
83 scoped_refptr
<SyncPointManager
> sync_point_manager
,
84 scoped_refptr
<base::MessageLoopProxy
> message_loop
)
85 : preemption_state_(IDLE
),
86 gpu_channel_(gpu_channel
),
88 sync_point_manager_(sync_point_manager
),
89 message_loop_(message_loop
),
90 messages_forwarded_to_channel_(0),
91 a_stub_is_descheduled_(false),
92 hmac_(crypto::HMAC::SHA256
) {
93 bool success
= hmac_
.Init(base::StringPiece(private_key
));
97 virtual void OnFilterAdded(IPC::Channel
* channel
) OVERRIDE
{
102 virtual void OnFilterRemoved() OVERRIDE
{
107 virtual bool OnMessageReceived(const IPC::Message
& message
) OVERRIDE
{
111 IPC_BEGIN_MESSAGE_MAP(GpuChannelMessageFilter
, message
)
112 IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNames
,
113 OnGenerateMailboxNames
)
114 IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesAsync
,
115 OnGenerateMailboxNamesAsync
)
116 IPC_MESSAGE_UNHANDLED(handled
= false)
117 IPC_END_MESSAGE_MAP()
119 if (message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) {
120 // This message should not be sent explicitly by the renderer.
125 // All other messages get processed by the GpuChannel.
127 messages_forwarded_to_channel_
++;
128 if (preempting_flag_
.get())
129 pending_messages_
.push(PendingMessage(messages_forwarded_to_channel_
));
130 UpdatePreemptionState();
133 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
134 uint32 sync_point
= sync_point_manager_
->GenerateSyncPoint();
135 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
136 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
138 message_loop_
->PostTask(FROM_HERE
, base::Bind(
139 &GpuChannelMessageFilter::InsertSyncPointOnMainThread
,
142 message
.routing_id(),
149 void MessageProcessed(uint64 messages_processed
) {
150 while (!pending_messages_
.empty() &&
151 pending_messages_
.front().message_number
<= messages_processed
)
152 pending_messages_
.pop();
153 UpdatePreemptionState();
156 void SetPreemptingFlagAndSchedulingState(
157 gpu::PreemptionFlag
* preempting_flag
,
158 bool a_stub_is_descheduled
) {
159 preempting_flag_
= preempting_flag
;
160 a_stub_is_descheduled_
= a_stub_is_descheduled
;
163 void UpdateStubSchedulingState(bool a_stub_is_descheduled
) {
164 a_stub_is_descheduled_
= a_stub_is_descheduled
;
165 UpdatePreemptionState();
168 bool Send(IPC::Message
* message
) {
169 return channel_
->Send(message
);
173 virtual ~GpuChannelMessageFilter() {
174 message_loop_
->PostTask(FROM_HERE
, base::Bind(
175 &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread
, gpu_channel_
));
180 void OnGenerateMailboxNames(unsigned num
, std::vector
<gpu::Mailbox
>* result
) {
181 TRACE_EVENT1("gpu", "OnGenerateMailboxNames", "num", num
);
185 for (unsigned i
= 0; i
< num
; ++i
) {
186 char name
[GL_MAILBOX_SIZE_CHROMIUM
];
187 base::RandBytes(name
, sizeof(name
) / 2);
189 bool success
= hmac_
.Sign(
190 base::StringPiece(name
, sizeof(name
) / 2),
191 reinterpret_cast<unsigned char*>(name
) + sizeof(name
) / 2,
195 (*result
)[i
].SetName(reinterpret_cast<int8
*>(name
));
199 void OnGenerateMailboxNamesAsync(unsigned num
) {
200 std::vector
<gpu::Mailbox
> names
;
201 OnGenerateMailboxNames(num
, &names
);
202 Send(new GpuChannelMsg_GenerateMailboxNamesReply(names
));
205 enum PreemptionState
{
206 // Either there's no other channel to preempt, there are no messages
207 // pending processing, or we just finished preempting and have to wait
208 // before preempting again.
210 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
212 // We can preempt whenever any IPC processing takes more than
213 // kPreemptWaitTimeMs.
215 // We are currently preempting (i.e. no stub is descheduled).
217 // We would like to preempt, but some stub is descheduled.
218 WOULD_PREEMPT_DESCHEDULED
,
221 PreemptionState preemption_state_
;
223 // Maximum amount of time that we can spend in PREEMPTING.
224 // It is reset when we transition to IDLE.
225 base::TimeDelta max_preemption_time_
;
227 struct PendingMessage
{
228 uint64 message_number
;
229 base::TimeTicks time_received
;
231 explicit PendingMessage(uint64 message_number
)
232 : message_number(message_number
),
233 time_received(base::TimeTicks::Now()) {
237 void UpdatePreemptionState() {
238 switch (preemption_state_
) {
240 if (preempting_flag_
.get() && !pending_messages_
.empty())
241 TransitionToWaiting();
244 // A timer will transition us to CHECKING.
245 DCHECK(timer_
.IsRunning());
248 if (!pending_messages_
.empty()) {
249 base::TimeDelta time_elapsed
=
250 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
251 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
252 // Schedule another check for when the IPC may go long.
255 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
257 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
259 if (a_stub_is_descheduled_
)
260 TransitionToWouldPreemptDescheduled();
262 TransitionToPreempting();
267 // A TransitionToIdle() timer should always be running in this state.
268 DCHECK(timer_
.IsRunning());
269 if (a_stub_is_descheduled_
)
270 TransitionToWouldPreemptDescheduled();
272 TransitionToIdleIfCaughtUp();
274 case WOULD_PREEMPT_DESCHEDULED
:
275 // A TransitionToIdle() timer should never be running in this state.
276 DCHECK(!timer_
.IsRunning());
277 if (!a_stub_is_descheduled_
)
278 TransitionToPreempting();
280 TransitionToIdleIfCaughtUp();
287 void TransitionToIdleIfCaughtUp() {
288 DCHECK(preemption_state_
== PREEMPTING
||
289 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
290 if (pending_messages_
.empty()) {
293 base::TimeDelta time_elapsed
=
294 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
295 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
300 void TransitionToIdle() {
301 DCHECK(preemption_state_
== PREEMPTING
||
302 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
303 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
306 preemption_state_
= IDLE
;
307 preempting_flag_
->Reset();
308 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
310 UpdatePreemptionState();
313 void TransitionToWaiting() {
314 DCHECK_EQ(preemption_state_
, IDLE
);
315 DCHECK(!timer_
.IsRunning());
317 preemption_state_
= WAITING
;
320 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
),
321 this, &GpuChannelMessageFilter::TransitionToChecking
);
324 void TransitionToChecking() {
325 DCHECK_EQ(preemption_state_
, WAITING
);
326 DCHECK(!timer_
.IsRunning());
328 preemption_state_
= CHECKING
;
329 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
330 UpdatePreemptionState();
333 void TransitionToPreempting() {
334 DCHECK(preemption_state_
== CHECKING
||
335 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
336 DCHECK(!a_stub_is_descheduled_
);
338 // Stop any pending state update checks that we may have queued
340 if (preemption_state_
== CHECKING
)
343 preemption_state_
= PREEMPTING
;
344 preempting_flag_
->Set();
345 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
349 max_preemption_time_
,
350 this, &GpuChannelMessageFilter::TransitionToIdle
);
352 UpdatePreemptionState();
355 void TransitionToWouldPreemptDescheduled() {
356 DCHECK(preemption_state_
== CHECKING
||
357 preemption_state_
== PREEMPTING
);
358 DCHECK(a_stub_is_descheduled_
);
360 if (preemption_state_
== CHECKING
) {
361 // Stop any pending state update checks that we may have queued
365 // Stop any TransitionToIdle() timers that we may have queued
368 max_preemption_time_
= timer_
.desired_run_time() - base::TimeTicks::Now();
369 if (max_preemption_time_
< base::TimeDelta()) {
375 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
376 preempting_flag_
->Reset();
377 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
379 UpdatePreemptionState();
382 static void InsertSyncPointOnMainThread(
383 base::WeakPtr
<GpuChannel
>* gpu_channel
,
384 scoped_refptr
<SyncPointManager
> manager
,
387 // This function must ensure that the sync point will be retired. Normally
388 // we'll find the stub based on the routing ID, and associate the sync point
389 // with it, but if that fails for any reason (channel or stub already
390 // deleted, invalid routing id), we need to retire the sync point
392 if (gpu_channel
->get()) {
393 GpuCommandBufferStub
* stub
= gpu_channel
->get()->LookupCommandBuffer(
396 stub
->AddSyncPoint(sync_point
);
397 GpuCommandBufferMsg_RetireSyncPoint
message(routing_id
, sync_point
);
398 gpu_channel
->get()->OnMessageReceived(message
);
401 gpu_channel
->get()->MessageProcessed();
404 manager
->RetireSyncPoint(sync_point
);
407 static void DeleteWeakPtrOnMainThread(
408 base::WeakPtr
<GpuChannel
>* gpu_channel
) {
412 // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
413 // IO thread, it's only passed through - therefore the WeakPtr assumptions are
415 base::WeakPtr
<GpuChannel
>* gpu_channel_
;
416 IPC::Channel
* channel_
;
417 scoped_refptr
<SyncPointManager
> sync_point_manager_
;
418 scoped_refptr
<base::MessageLoopProxy
> message_loop_
;
419 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
421 std::queue
<PendingMessage
> pending_messages_
;
423 // Count of the number of IPCs forwarded to the GpuChannel.
424 uint64 messages_forwarded_to_channel_
;
426 base::OneShotTimer
<GpuChannelMessageFilter
> timer_
;
428 bool a_stub_is_descheduled_
;
433 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
434 GpuWatchdog
* watchdog
,
435 gfx::GLShareGroup
* share_group
,
436 gpu::gles2::MailboxManager
* mailbox
,
439 : gpu_channel_manager_(gpu_channel_manager
),
440 messages_processed_(0),
441 client_id_(client_id
),
442 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
443 mailbox_manager_(mailbox
? mailbox
: new gpu::gles2::MailboxManager
),
444 image_manager_(new gpu::gles2::ImageManager
),
447 handle_messages_scheduled_(false),
448 processed_get_state_fast_(false),
449 currently_processing_message_(NULL
),
451 num_stubs_descheduled_(0) {
452 DCHECK(gpu_channel_manager
);
455 channel_id_
= IPC::Channel::GenerateVerifiedChannelID("gpu");
456 const CommandLine
* command_line
= CommandLine::ForCurrentProcess();
457 log_messages_
= command_line
->HasSwitch(switches::kLogPluginMessages
);
458 disallowed_features_
.multisampling
=
459 command_line
->HasSwitch(switches::kDisableGLMultisampling
);
460 #if defined(OS_ANDROID)
461 stream_texture_manager_
.reset(new StreamTextureManagerAndroid(this));
466 bool GpuChannel::Init(base::MessageLoopProxy
* io_message_loop
,
467 base::WaitableEvent
* shutdown_event
) {
468 DCHECK(!channel_
.get());
470 // Map renderer ID to a (single) channel to that process.
471 channel_
.reset(new IPC::SyncChannel(
473 IPC::Channel::MODE_SERVER
,
479 base::WeakPtr
<GpuChannel
>* weak_ptr(new base::WeakPtr
<GpuChannel
>(
480 weak_factory_
.GetWeakPtr()));
482 filter_
= new GpuChannelMessageFilter(
483 mailbox_manager_
->private_key(),
485 gpu_channel_manager_
->sync_point_manager(),
486 base::MessageLoopProxy::current());
487 io_message_loop_
= io_message_loop
;
488 channel_
->AddFilter(filter_
.get());
493 std::string
GpuChannel::GetChannelName() {
497 #if defined(OS_POSIX)
498 int GpuChannel::TakeRendererFileDescriptor() {
503 return channel_
->TakeClientFileDescriptor();
505 #endif // defined(OS_POSIX)
507 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
509 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
510 << " with type " << message
.type();
513 if (message
.type() == GpuCommandBufferMsg_GetStateFast::ID
) {
514 if (processed_get_state_fast_
) {
515 // Require a non-GetStateFast message in between two GetStateFast
516 // messages, to ensure progress is made.
517 std::deque
<IPC::Message
*>::iterator point
= deferred_messages_
.begin();
519 while (point
!= deferred_messages_
.end() &&
520 (*point
)->type() == GpuCommandBufferMsg_GetStateFast::ID
) {
524 if (point
!= deferred_messages_
.end()) {
528 deferred_messages_
.insert(point
, new IPC::Message(message
));
530 // Move GetStateFast commands to the head of the queue, so the renderer
531 // doesn't have to wait any longer than necessary.
532 deferred_messages_
.push_front(new IPC::Message(message
));
535 deferred_messages_
.push_back(new IPC::Message(message
));
543 void GpuChannel::OnChannelError() {
544 gpu_channel_manager_
->RemoveChannel(client_id_
);
547 bool GpuChannel::Send(IPC::Message
* message
) {
548 // The GPU process must never send a synchronous IPC message to the renderer
549 // process. This could result in deadlock.
550 DCHECK(!message
->is_sync());
552 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
553 << " with type " << message
->type();
561 return channel_
->Send(message
);
564 void GpuChannel::RequeueMessage() {
565 DCHECK(currently_processing_message_
);
566 deferred_messages_
.push_front(
567 new IPC::Message(*currently_processing_message_
));
568 messages_processed_
--;
569 currently_processing_message_
= NULL
;
572 void GpuChannel::OnScheduled() {
573 if (handle_messages_scheduled_
)
575 // Post a task to handle any deferred messages. The deferred message queue is
576 // not emptied here, which ensures that OnMessageReceived will continue to
577 // defer newly received messages until the ones in the queue have all been
578 // handled by HandleMessage. HandleMessage is invoked as a
579 // task to prevent reentrancy.
580 base::MessageLoop::current()->PostTask(
582 base::Bind(&GpuChannel::HandleMessage
, weak_factory_
.GetWeakPtr()));
583 handle_messages_scheduled_
= true;
586 void GpuChannel::StubSchedulingChanged(bool scheduled
) {
587 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
589 num_stubs_descheduled_
--;
592 num_stubs_descheduled_
++;
594 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
595 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
597 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
598 if (preempting_flag_
.get()) {
599 io_message_loop_
->PostTask(
601 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
603 a_stub_is_descheduled
));
608 void GpuChannel::CreateViewCommandBuffer(
609 const gfx::GLSurfaceHandle
& window
,
611 const GPUCreateCommandBufferConfig
& init_params
,
614 "GpuChannel::CreateViewCommandBuffer",
618 *route_id
= MSG_ROUTING_NONE
;
620 #if defined(ENABLE_GPU)
622 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
624 // Virtualize compositor contexts on OS X to prevent performance regressions
625 // when enabling FCM.
626 // http://crbug.com/180463
627 bool use_virtualized_gl_context
= false;
628 #if defined(OS_MACOSX)
629 use_virtualized_gl_context
= true;
632 *route_id
= GenerateRouteID();
633 scoped_ptr
<GpuCommandBufferStub
> stub(
634 new GpuCommandBufferStub(this,
637 mailbox_manager_
.get(),
638 image_manager_
.get(),
640 disallowed_features_
,
642 init_params
.gpu_preference
,
643 use_virtualized_gl_context
,
648 init_params
.active_url
));
649 if (preempted_flag_
.get())
650 stub
->SetPreemptByFlag(preempted_flag_
);
651 router_
.AddRoute(*route_id
, stub
.get());
652 stubs_
.AddWithID(stub
.release(), *route_id
);
656 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
657 return stubs_
.Lookup(route_id
);
660 void GpuChannel::CreateImage(
661 gfx::PluginWindowHandle window
,
665 "GpuChannel::CreateImage",
671 if (image_manager_
->LookupImage(image_id
)) {
672 LOG(ERROR
) << "CreateImage failed, image_id already in use.";
676 scoped_refptr
<gfx::GLImage
> image
= gfx::GLImage::CreateGLImage(window
);
680 image_manager_
->AddImage(image
.get(), image_id
);
681 *size
= image
->GetSize();
684 void GpuChannel::DeleteImage(int32 image_id
) {
686 "GpuChannel::DeleteImage",
690 image_manager_
->RemoveImage(image_id
);
693 void GpuChannel::LoseAllContexts() {
694 gpu_channel_manager_
->LoseAllContexts();
697 void GpuChannel::MarkAllContextsLost() {
698 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
699 !it
.IsAtEnd(); it
.Advance()) {
700 it
.GetCurrentValue()->MarkContextLost();
704 void GpuChannel::DestroySoon() {
705 base::MessageLoop::current()->PostTask(
706 FROM_HERE
, base::Bind(&GpuChannel::OnDestroy
, this));
709 int GpuChannel::GenerateRouteID() {
710 static int last_id
= 0;
714 void GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
715 router_
.AddRoute(route_id
, listener
);
718 void GpuChannel::RemoveRoute(int32 route_id
) {
719 router_
.RemoveRoute(route_id
);
722 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
723 if (!preempting_flag_
.get()) {
724 preempting_flag_
= new gpu::PreemptionFlag
;
725 io_message_loop_
->PostTask(
726 FROM_HERE
, base::Bind(
727 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
728 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
730 return preempting_flag_
.get();
733 void GpuChannel::SetPreemptByFlag(
734 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
735 preempted_flag_
= preempted_flag
;
737 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
738 !it
.IsAtEnd(); it
.Advance()) {
739 it
.GetCurrentValue()->SetPreemptByFlag(preempted_flag_
);
743 GpuChannel::~GpuChannel() {
744 if (preempting_flag_
.get())
745 preempting_flag_
->Reset();
748 void GpuChannel::OnDestroy() {
749 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
750 gpu_channel_manager_
->RemoveChannel(client_id_
);
753 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
755 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
756 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
757 OnCreateOffscreenCommandBuffer
)
758 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
759 OnDestroyCommandBuffer
)
760 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateVideoEncoder
, OnCreateVideoEncoder
)
761 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyVideoEncoder
,
762 OnDestroyVideoEncoder
)
763 #if defined(OS_ANDROID)
764 IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterStreamTextureProxy
,
765 OnRegisterStreamTextureProxy
)
766 IPC_MESSAGE_HANDLER(GpuChannelMsg_EstablishStreamTexture
,
767 OnEstablishStreamTexture
)
768 IPC_MESSAGE_HANDLER(GpuChannelMsg_SetStreamTextureSize
,
769 OnSetStreamTextureSize
)
772 GpuChannelMsg_CollectRenderingStatsForSurface
,
773 OnCollectRenderingStatsForSurface
)
774 IPC_MESSAGE_UNHANDLED(handled
= false)
775 IPC_END_MESSAGE_MAP()
776 DCHECK(handled
) << msg
.type();
780 void GpuChannel::HandleMessage() {
781 handle_messages_scheduled_
= false;
782 if (deferred_messages_
.empty())
785 bool should_fast_track_ack
= false;
786 IPC::Message
* m
= deferred_messages_
.front();
787 GpuCommandBufferStub
* stub
= stubs_
.Lookup(m
->routing_id());
791 if (!stub
->IsScheduled())
793 if (stub
->IsPreempted()) {
799 scoped_ptr
<IPC::Message
> message(m
);
800 deferred_messages_
.pop_front();
801 bool message_processed
= true;
803 processed_get_state_fast_
=
804 (message
->type() == GpuCommandBufferMsg_GetStateFast::ID
);
806 currently_processing_message_
= message
.get();
808 if (message
->routing_id() == MSG_ROUTING_CONTROL
)
809 result
= OnControlMessageReceived(*message
);
811 result
= router_
.RouteMessage(*message
);
812 currently_processing_message_
= NULL
;
815 // Respond to sync messages even if router failed to route.
816 if (message
->is_sync()) {
817 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&*message
);
818 reply
->set_reply_error();
822 // If the command buffer becomes unscheduled as a result of handling the
823 // message but still has more commands to process, synthesize an IPC
824 // message to flush that command buffer.
826 if (stub
->HasUnprocessedCommands()) {
827 deferred_messages_
.push_front(new GpuCommandBufferMsg_Rescheduled(
829 message_processed
= false;
833 if (message_processed
)
836 // We want the EchoACK following the SwapBuffers to be sent as close as
837 // possible, avoiding scheduling other channels in the meantime.
838 should_fast_track_ack
= false;
839 if (!deferred_messages_
.empty()) {
840 m
= deferred_messages_
.front();
841 stub
= stubs_
.Lookup(m
->routing_id());
842 should_fast_track_ack
=
843 (m
->type() == GpuCommandBufferMsg_Echo::ID
) &&
844 stub
&& stub
->IsScheduled();
846 } while (should_fast_track_ack
);
848 if (!deferred_messages_
.empty()) {
853 void GpuChannel::OnCreateOffscreenCommandBuffer(
854 const gfx::Size
& size
,
855 const GPUCreateCommandBufferConfig
& init_params
,
857 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
858 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
860 *route_id
= GenerateRouteID();
862 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
865 gfx::GLSurfaceHandle(),
866 mailbox_manager_
.get(),
867 image_manager_
.get(),
869 disallowed_features_
,
871 init_params
.gpu_preference
,
877 init_params
.active_url
));
878 if (preempted_flag_
.get())
879 stub
->SetPreemptByFlag(preempted_flag_
);
880 router_
.AddRoute(*route_id
, stub
.get());
881 stubs_
.AddWithID(stub
.release(), *route_id
);
882 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
883 "route_id", route_id
);
886 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
887 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
888 "route_id", route_id
);
890 GpuCommandBufferStub
* stub
= stubs_
.Lookup(route_id
);
893 bool need_reschedule
= (stub
&& !stub
->IsScheduled());
894 router_
.RemoveRoute(route_id
);
895 stubs_
.Remove(route_id
);
896 // In case the renderer is currently blocked waiting for a sync reply from the
897 // stub, we need to make sure to reschedule the GpuChannel here.
898 if (need_reschedule
) {
899 // This stub won't get a chance to reschedule, so update the count now.
900 StubSchedulingChanged(true);
904 void GpuChannel::OnCreateVideoEncoder(int32
* route_id
) {
905 TRACE_EVENT0("gpu", "GpuChannel::OnCreateVideoEncoder");
907 *route_id
= GenerateRouteID();
908 GpuVideoEncodeAccelerator
* encoder
=
909 new GpuVideoEncodeAccelerator(this, *route_id
);
910 router_
.AddRoute(*route_id
, encoder
);
911 video_encoders_
.AddWithID(encoder
, *route_id
);
914 void GpuChannel::OnDestroyVideoEncoder(int32 route_id
) {
916 "gpu", "GpuChannel::OnDestroyVideoEncoder", "route_id", route_id
);
917 GpuVideoEncodeAccelerator
* encoder
= video_encoders_
.Lookup(route_id
);
920 router_
.RemoveRoute(route_id
);
921 video_encoders_
.Remove(route_id
);
924 #if defined(OS_ANDROID)
925 void GpuChannel::OnRegisterStreamTextureProxy(
926 int32 stream_id
, int32
* route_id
) {
927 // Note that route_id is only used for notifications sent out from here.
928 // StreamTextureManager owns all texture objects and for incoming messages
929 // it finds the correct object based on stream_id.
930 *route_id
= GenerateRouteID();
931 stream_texture_manager_
->RegisterStreamTextureProxy(stream_id
, *route_id
);
934 void GpuChannel::OnEstablishStreamTexture(
935 int32 stream_id
, int32 primary_id
, int32 secondary_id
) {
936 stream_texture_manager_
->EstablishStreamTexture(
937 stream_id
, primary_id
, secondary_id
);
940 void GpuChannel::OnSetStreamTextureSize(
941 int32 stream_id
, const gfx::Size
& size
) {
942 stream_texture_manager_
->SetStreamTextureSize(stream_id
, size
);
946 void GpuChannel::OnCollectRenderingStatsForSurface(
947 int32 surface_id
, GpuRenderingStats
* stats
) {
948 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
949 !it
.IsAtEnd(); it
.Advance()) {
950 int texture_upload_count
=
951 it
.GetCurrentValue()->decoder()->GetTextureUploadCount();
952 base::TimeDelta total_texture_upload_time
=
953 it
.GetCurrentValue()->decoder()->GetTotalTextureUploadTime();
954 base::TimeDelta total_processing_commands_time
=
955 it
.GetCurrentValue()->decoder()->GetTotalProcessingCommandsTime();
957 stats
->global_texture_upload_count
+= texture_upload_count
;
958 stats
->global_total_texture_upload_time
+= total_texture_upload_time
;
959 stats
->global_total_processing_commands_time
+=
960 total_processing_commands_time
;
961 if (it
.GetCurrentValue()->surface_id() == surface_id
) {
962 stats
->texture_upload_count
+= texture_upload_count
;
963 stats
->total_texture_upload_time
+= total_texture_upload_time
;
964 stats
->total_processing_commands_time
+= total_processing_commands_time
;
969 void GpuChannel::MessageProcessed() {
970 messages_processed_
++;
971 if (preempting_flag_
.get()) {
972 io_message_loop_
->PostTask(
974 base::Bind(&GpuChannelMessageFilter::MessageProcessed
,
976 messages_processed_
));
980 void GpuChannel::CacheShader(const std::string
& key
,
981 const std::string
& shader
) {
982 gpu_channel_manager_
->Send(
983 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
986 void GpuChannel::AddFilter(IPC::ChannelProxy::MessageFilter
* filter
) {
987 channel_
->AddFilter(filter
);
990 void GpuChannel::RemoveFilter(IPC::ChannelProxy::MessageFilter
* filter
) {
991 channel_
->RemoveFilter(filter
);
994 } // namespace content