1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
15 #include "base/bind.h"
16 #include "base/command_line.h"
17 #include "base/location.h"
18 #include "base/single_thread_task_runner.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_util.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/timer/timer.h"
23 #include "base/trace_event/memory_dump_manager.h"
24 #include "base/trace_event/process_memory_dump.h"
25 #include "base/trace_event/trace_event.h"
26 #include "content/common/gpu/gpu_channel_manager.h"
27 #include "content/common/gpu/gpu_memory_buffer_factory.h"
28 #include "content/common/gpu/gpu_messages.h"
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
30 #include "content/public/common/content_switches.h"
31 #include "gpu/command_buffer/common/mailbox.h"
32 #include "gpu/command_buffer/common/value_state.h"
33 #include "gpu/command_buffer/service/gpu_scheduler.h"
34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/valuebuffer_manager.h"
38 #include "ipc/ipc_channel.h"
39 #include "ipc/message_filter.h"
40 #include "ui/gl/gl_context.h"
41 #include "ui/gl/gl_image_shared_memory.h"
42 #include "ui/gl/gl_surface.h"
45 #include "ipc/ipc_channel_posix.h"
51 // Number of milliseconds between successive vsync. Many GL commands block
52 // on vsync, so thresholds for preemption should be multiples of this.
53 const int64 kVsyncIntervalMs
= 17;
55 // Amount of time that we will wait for an IPC to be processed before
56 // preempting. After a preemption, we must wait this long before triggering
57 // another preemption.
58 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
60 // Once we trigger a preemption, the maximum duration that we will wait
61 // before clearing the preemption.
62 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
64 // Stop the preemption once the time for the longest pending IPC drops
65 // below this threshold.
66 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
68 } // anonymous namespace
70 // This filter does three things:
71 // - it counts and timestamps each message forwarded to the channel
72 // so that we can preempt other channels if a message takes too long to
73 // process. To guarantee fairness, we must wait a minimum amount of time
74 // before preempting and we limit the amount of time that we can preempt in
75 // one shot (see constants above).
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
77 // thread, generating the sync point ID and responding immediately, and then
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
79 // into the channel's queue.
80 // - it generates mailbox names for clients of the GPU process on the IO thread.
81 class GpuChannelMessageFilter
: public IPC::MessageFilter
{
83 GpuChannelMessageFilter(
84 base::WeakPtr
<GpuChannel
> gpu_channel
,
85 gpu::SyncPointManager
* sync_point_manager
,
86 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner
,
87 bool future_sync_points
)
88 : preemption_state_(IDLE
),
89 gpu_channel_(gpu_channel
),
91 sync_point_manager_(sync_point_manager
),
92 task_runner_(task_runner
),
93 messages_forwarded_to_channel_(0),
94 a_stub_is_descheduled_(false),
95 future_sync_points_(future_sync_points
) {}
97 void OnFilterAdded(IPC::Sender
* sender
) override
{
102 void OnFilterRemoved() override
{
107 bool OnMessageReceived(const IPC::Message
& message
) override
{
110 bool handled
= false;
111 if ((message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) &&
112 !future_sync_points_
) {
113 DLOG(ERROR
) << "Untrusted client should not send "
114 "GpuCommandBufferMsg_RetireSyncPoint message";
118 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
119 base::Tuple
<bool> retire
;
120 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
121 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message
,
123 reply
->set_reply_error();
127 if (!future_sync_points_
&& !base::get
<0>(retire
)) {
128 LOG(ERROR
) << "Untrusted contexts can't create future sync points";
129 reply
->set_reply_error();
133 uint32 sync_point
= sync_point_manager_
->GenerateSyncPoint();
134 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
136 task_runner_
->PostTask(
138 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread
,
139 gpu_channel_
, sync_point_manager_
, message
.routing_id(),
140 base::get
<0>(retire
), sync_point
));
144 // These are handled by GpuJpegDecodeAccelerator and
145 // GpuVideoDecodeAccelerator.
146 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by
147 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we
148 // don't need to exclude them one by one here.
149 if (message
.type() == AcceleratedJpegDecoderMsg_Decode::ID
||
150 message
.type() == AcceleratedJpegDecoderMsg_Destroy::ID
||
151 message
.type() == AcceleratedVideoDecoderMsg_Decode::ID
) {
155 // All other messages get processed by the GpuChannel.
156 messages_forwarded_to_channel_
++;
157 if (preempting_flag_
.get())
158 pending_messages_
.push(PendingMessage(messages_forwarded_to_channel_
));
159 UpdatePreemptionState();
164 void MessageProcessed(uint64 messages_processed
) {
165 while (!pending_messages_
.empty() &&
166 pending_messages_
.front().message_number
<= messages_processed
)
167 pending_messages_
.pop();
168 UpdatePreemptionState();
171 void SetPreemptingFlagAndSchedulingState(
172 gpu::PreemptionFlag
* preempting_flag
,
173 bool a_stub_is_descheduled
) {
174 preempting_flag_
= preempting_flag
;
175 a_stub_is_descheduled_
= a_stub_is_descheduled
;
178 void UpdateStubSchedulingState(bool a_stub_is_descheduled
) {
179 a_stub_is_descheduled_
= a_stub_is_descheduled
;
180 UpdatePreemptionState();
183 bool Send(IPC::Message
* message
) {
184 return sender_
->Send(message
);
188 ~GpuChannelMessageFilter() override
{}
191 enum PreemptionState
{
192 // Either there's no other channel to preempt, there are no messages
193 // pending processing, or we just finished preempting and have to wait
194 // before preempting again.
196 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
198 // We can preempt whenever any IPC processing takes more than
199 // kPreemptWaitTimeMs.
201 // We are currently preempting (i.e. no stub is descheduled).
203 // We would like to preempt, but some stub is descheduled.
204 WOULD_PREEMPT_DESCHEDULED
,
207 PreemptionState preemption_state_
;
209 // Maximum amount of time that we can spend in PREEMPTING.
210 // It is reset when we transition to IDLE.
211 base::TimeDelta max_preemption_time_
;
213 struct PendingMessage
{
214 uint64 message_number
;
215 base::TimeTicks time_received
;
217 explicit PendingMessage(uint64 message_number
)
218 : message_number(message_number
),
219 time_received(base::TimeTicks::Now()) {
223 void UpdatePreemptionState() {
224 switch (preemption_state_
) {
226 if (preempting_flag_
.get() && !pending_messages_
.empty())
227 TransitionToWaiting();
230 // A timer will transition us to CHECKING.
231 DCHECK(timer_
.IsRunning());
234 if (!pending_messages_
.empty()) {
235 base::TimeDelta time_elapsed
=
236 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
237 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
238 // Schedule another check for when the IPC may go long.
241 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
243 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
245 if (a_stub_is_descheduled_
)
246 TransitionToWouldPreemptDescheduled();
248 TransitionToPreempting();
253 // A TransitionToIdle() timer should always be running in this state.
254 DCHECK(timer_
.IsRunning());
255 if (a_stub_is_descheduled_
)
256 TransitionToWouldPreemptDescheduled();
258 TransitionToIdleIfCaughtUp();
260 case WOULD_PREEMPT_DESCHEDULED
:
261 // A TransitionToIdle() timer should never be running in this state.
262 DCHECK(!timer_
.IsRunning());
263 if (!a_stub_is_descheduled_
)
264 TransitionToPreempting();
266 TransitionToIdleIfCaughtUp();
273 void TransitionToIdleIfCaughtUp() {
274 DCHECK(preemption_state_
== PREEMPTING
||
275 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
276 if (pending_messages_
.empty()) {
279 base::TimeDelta time_elapsed
=
280 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
281 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
286 void TransitionToIdle() {
287 DCHECK(preemption_state_
== PREEMPTING
||
288 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
289 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
292 preemption_state_
= IDLE
;
293 preempting_flag_
->Reset();
294 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
296 UpdatePreemptionState();
299 void TransitionToWaiting() {
300 DCHECK_EQ(preemption_state_
, IDLE
);
301 DCHECK(!timer_
.IsRunning());
303 preemption_state_
= WAITING
;
306 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
),
307 this, &GpuChannelMessageFilter::TransitionToChecking
);
310 void TransitionToChecking() {
311 DCHECK_EQ(preemption_state_
, WAITING
);
312 DCHECK(!timer_
.IsRunning());
314 preemption_state_
= CHECKING
;
315 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
316 UpdatePreemptionState();
319 void TransitionToPreempting() {
320 DCHECK(preemption_state_
== CHECKING
||
321 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
322 DCHECK(!a_stub_is_descheduled_
);
324 // Stop any pending state update checks that we may have queued
326 if (preemption_state_
== CHECKING
)
329 preemption_state_
= PREEMPTING
;
330 preempting_flag_
->Set();
331 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
335 max_preemption_time_
,
336 this, &GpuChannelMessageFilter::TransitionToIdle
);
338 UpdatePreemptionState();
341 void TransitionToWouldPreemptDescheduled() {
342 DCHECK(preemption_state_
== CHECKING
||
343 preemption_state_
== PREEMPTING
);
344 DCHECK(a_stub_is_descheduled_
);
346 if (preemption_state_
== CHECKING
) {
347 // Stop any pending state update checks that we may have queued
351 // Stop any TransitionToIdle() timers that we may have queued
354 max_preemption_time_
= timer_
.desired_run_time() - base::TimeTicks::Now();
355 if (max_preemption_time_
< base::TimeDelta()) {
361 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
362 preempting_flag_
->Reset();
363 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
365 UpdatePreemptionState();
368 static void InsertSyncPointOnMainThread(
369 base::WeakPtr
<GpuChannel
> gpu_channel
,
370 gpu::SyncPointManager
* manager
,
374 // This function must ensure that the sync point will be retired. Normally
375 // we'll find the stub based on the routing ID, and associate the sync point
376 // with it, but if that fails for any reason (channel or stub already
377 // deleted, invalid routing id), we need to retire the sync point
380 GpuCommandBufferStub
* stub
= gpu_channel
->LookupCommandBuffer(routing_id
);
382 stub
->AddSyncPoint(sync_point
);
384 GpuCommandBufferMsg_RetireSyncPoint
message(routing_id
, sync_point
);
385 gpu_channel
->OnMessageReceived(message
);
389 gpu_channel
->MessageProcessed();
392 manager
->RetireSyncPoint(sync_point
);
395 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
396 // passed through - therefore the WeakPtr assumptions are respected.
397 base::WeakPtr
<GpuChannel
> gpu_channel_
;
398 IPC::Sender
* sender_
;
399 gpu::SyncPointManager
* sync_point_manager_
;
400 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner_
;
401 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
403 std::queue
<PendingMessage
> pending_messages_
;
405 // Count of the number of IPCs forwarded to the GpuChannel.
406 uint64 messages_forwarded_to_channel_
;
408 base::OneShotTimer
<GpuChannelMessageFilter
> timer_
;
410 bool a_stub_is_descheduled_
;
412 // True if this channel can create future sync points.
413 bool future_sync_points_
;
416 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
417 GpuWatchdog
* watchdog
,
418 gfx::GLShareGroup
* share_group
,
419 gpu::gles2::MailboxManager
* mailbox
,
421 uint64_t client_tracing_id
,
423 bool allow_future_sync_points
)
424 : gpu_channel_manager_(gpu_channel_manager
),
425 messages_processed_(0),
426 client_id_(client_id
),
427 client_tracing_id_(client_tracing_id
),
428 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
429 mailbox_manager_(mailbox
430 ? scoped_refptr
<gpu::gles2::MailboxManager
>(mailbox
)
431 : gpu::gles2::MailboxManager::Create()),
434 handle_messages_scheduled_(false),
435 currently_processing_message_(NULL
),
436 num_stubs_descheduled_(0),
437 allow_future_sync_points_(allow_future_sync_points
),
438 weak_factory_(this) {
439 DCHECK(gpu_channel_manager
);
442 channel_id_
= IPC::Channel::GenerateVerifiedChannelID("gpu");
443 const base::CommandLine
* command_line
=
444 base::CommandLine::ForCurrentProcess();
445 log_messages_
= command_line
->HasSwitch(switches::kLogPluginMessages
);
447 subscription_ref_set_
= new gpu::gles2::SubscriptionRefSet();
448 subscription_ref_set_
->AddObserver(this);
451 GpuChannel::~GpuChannel() {
452 STLDeleteElements(&deferred_messages_
);
453 subscription_ref_set_
->RemoveObserver(this);
454 if (preempting_flag_
.get())
455 preempting_flag_
->Reset();
457 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
461 void GpuChannel::Init(base::SingleThreadTaskRunner
* io_task_runner
,
462 base::WaitableEvent
* shutdown_event
,
463 IPC::AttachmentBroker
* broker
) {
464 DCHECK(!channel_
.get());
466 // Map renderer ID to a (single) channel to that process.
468 IPC::SyncChannel::Create(channel_id_
, IPC::Channel::MODE_SERVER
, this,
469 io_task_runner
, false, shutdown_event
, broker
);
471 filter_
= new GpuChannelMessageFilter(
472 weak_factory_
.GetWeakPtr(), gpu_channel_manager_
->sync_point_manager(),
473 base::ThreadTaskRunnerHandle::Get(), allow_future_sync_points_
);
474 io_task_runner_
= io_task_runner
;
475 channel_
->AddFilter(filter_
.get());
476 pending_valuebuffer_state_
= new gpu::ValueStateMap();
478 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
479 this, base::ThreadTaskRunnerHandle::Get());
482 std::string
GpuChannel::GetChannelName() {
486 #if defined(OS_POSIX)
487 base::ScopedFD
GpuChannel::TakeRendererFileDescriptor() {
490 return base::ScopedFD();
492 return channel_
->TakeClientFileDescriptor();
494 #endif // defined(OS_POSIX)
496 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
498 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
499 << " with type " << message
.type();
502 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
503 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
504 // Move Wait commands to the head of the queue, so the renderer
505 // doesn't have to wait any longer than necessary.
506 deferred_messages_
.push_front(new IPC::Message(message
));
508 deferred_messages_
.push_back(new IPC::Message(message
));
516 void GpuChannel::OnChannelError() {
517 gpu_channel_manager_
->RemoveChannel(client_id_
);
520 bool GpuChannel::Send(IPC::Message
* message
) {
521 // The GPU process must never send a synchronous IPC message to the renderer
522 // process. This could result in deadlock.
523 DCHECK(!message
->is_sync());
525 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
526 << " with type " << message
->type();
534 return channel_
->Send(message
);
537 void GpuChannel::OnAddSubscription(unsigned int target
) {
538 gpu_channel_manager()->Send(
539 new GpuHostMsg_AddSubscription(client_id_
, target
));
542 void GpuChannel::OnRemoveSubscription(unsigned int target
) {
543 gpu_channel_manager()->Send(
544 new GpuHostMsg_RemoveSubscription(client_id_
, target
));
547 void GpuChannel::RequeueMessage() {
548 DCHECK(currently_processing_message_
);
549 deferred_messages_
.push_front(
550 new IPC::Message(*currently_processing_message_
));
551 messages_processed_
--;
552 currently_processing_message_
= NULL
;
555 void GpuChannel::OnScheduled() {
556 if (handle_messages_scheduled_
)
558 // Post a task to handle any deferred messages. The deferred message queue is
559 // not emptied here, which ensures that OnMessageReceived will continue to
560 // defer newly received messages until the ones in the queue have all been
561 // handled by HandleMessage. HandleMessage is invoked as a
562 // task to prevent reentrancy.
563 base::ThreadTaskRunnerHandle::Get()->PostTask(
565 base::Bind(&GpuChannel::HandleMessage
, weak_factory_
.GetWeakPtr()));
566 handle_messages_scheduled_
= true;
569 void GpuChannel::StubSchedulingChanged(bool scheduled
) {
570 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
572 num_stubs_descheduled_
--;
575 num_stubs_descheduled_
++;
577 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
578 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
580 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
581 if (preempting_flag_
.get()) {
582 io_task_runner_
->PostTask(
584 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
585 filter_
, a_stub_is_descheduled
));
590 CreateCommandBufferResult
GpuChannel::CreateViewCommandBuffer(
591 const gfx::GLSurfaceHandle
& window
,
593 const GPUCreateCommandBufferConfig
& init_params
,
596 "GpuChannel::CreateViewCommandBuffer",
600 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
602 // Virtualize compositor contexts on OS X to prevent performance regressions
603 // when enabling FCM.
604 // http://crbug.com/180463
605 bool use_virtualized_gl_context
= false;
606 #if defined(OS_MACOSX)
607 use_virtualized_gl_context
= true;
610 scoped_ptr
<GpuCommandBufferStub
> stub(
611 new GpuCommandBufferStub(this,
614 mailbox_manager_
.get(),
615 subscription_ref_set_
.get(),
616 pending_valuebuffer_state_
.get(),
618 disallowed_features_
,
620 init_params
.gpu_preference
,
621 use_virtualized_gl_context
,
626 init_params
.active_url
));
627 if (preempted_flag_
.get())
628 stub
->SetPreemptByFlag(preempted_flag_
);
629 if (!router_
.AddRoute(route_id
, stub
.get())) {
630 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
631 "failed to add route";
632 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
;
634 stubs_
.AddWithID(stub
.release(), route_id
);
635 return CREATE_COMMAND_BUFFER_SUCCEEDED
;
638 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
639 return stubs_
.Lookup(route_id
);
642 void GpuChannel::LoseAllContexts() {
643 gpu_channel_manager_
->LoseAllContexts();
646 void GpuChannel::MarkAllContextsLost() {
647 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
648 !it
.IsAtEnd(); it
.Advance()) {
649 it
.GetCurrentValue()->MarkContextLost();
653 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
654 return router_
.AddRoute(route_id
, listener
);
657 void GpuChannel::RemoveRoute(int32 route_id
) {
658 router_
.RemoveRoute(route_id
);
661 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
662 if (!preempting_flag_
.get()) {
663 preempting_flag_
= new gpu::PreemptionFlag
;
664 io_task_runner_
->PostTask(
667 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
668 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
670 return preempting_flag_
.get();
673 void GpuChannel::SetPreemptByFlag(
674 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
675 preempted_flag_
= preempted_flag
;
677 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
678 !it
.IsAtEnd(); it
.Advance()) {
679 it
.GetCurrentValue()->SetPreemptByFlag(preempted_flag_
);
683 void GpuChannel::OnDestroy() {
684 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
685 gpu_channel_manager_
->RemoveChannel(client_id_
);
688 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
690 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
691 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
692 OnCreateOffscreenCommandBuffer
)
693 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
694 OnDestroyCommandBuffer
)
695 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder
,
697 IPC_MESSAGE_UNHANDLED(handled
= false)
698 IPC_END_MESSAGE_MAP()
699 DCHECK(handled
) << msg
.type();
703 void GpuChannel::HandleMessage() {
704 handle_messages_scheduled_
= false;
705 if (deferred_messages_
.empty())
708 IPC::Message
* m
= NULL
;
709 GpuCommandBufferStub
* stub
= NULL
;
711 m
= deferred_messages_
.front();
712 stub
= stubs_
.Lookup(m
->routing_id());
714 if (!stub
->IsScheduled())
716 if (stub
->IsPreempted()) {
722 scoped_ptr
<IPC::Message
> message(m
);
723 deferred_messages_
.pop_front();
724 bool message_processed
= true;
726 currently_processing_message_
= message
.get();
728 if (message
->routing_id() == MSG_ROUTING_CONTROL
)
729 result
= OnControlMessageReceived(*message
);
731 result
= router_
.RouteMessage(*message
);
732 currently_processing_message_
= NULL
;
735 // Respond to sync messages even if router failed to route.
736 if (message
->is_sync()) {
737 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&*message
);
738 reply
->set_reply_error();
742 // If the command buffer becomes unscheduled as a result of handling the
743 // message but still has more commands to process, synthesize an IPC
744 // message to flush that command buffer.
746 if (stub
->HasUnprocessedCommands()) {
747 deferred_messages_
.push_front(new GpuCommandBufferMsg_Rescheduled(
749 message_processed
= false;
753 if (message_processed
)
756 if (!deferred_messages_
.empty()) {
761 void GpuChannel::OnCreateOffscreenCommandBuffer(
762 const gfx::Size
& size
,
763 const GPUCreateCommandBufferConfig
& init_params
,
766 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
767 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
769 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
772 gfx::GLSurfaceHandle(),
773 mailbox_manager_
.get(),
774 subscription_ref_set_
.get(),
775 pending_valuebuffer_state_
.get(),
777 disallowed_features_
,
779 init_params
.gpu_preference
,
785 init_params
.active_url
));
786 if (preempted_flag_
.get())
787 stub
->SetPreemptByFlag(preempted_flag_
);
788 if (!router_
.AddRoute(route_id
, stub
.get())) {
789 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
790 "failed to add route";
794 stubs_
.AddWithID(stub
.release(), route_id
);
795 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
796 "route_id", route_id
);
800 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
801 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
802 "route_id", route_id
);
804 GpuCommandBufferStub
* stub
= stubs_
.Lookup(route_id
);
807 bool need_reschedule
= (stub
&& !stub
->IsScheduled());
808 router_
.RemoveRoute(route_id
);
809 stubs_
.Remove(route_id
);
810 // In case the renderer is currently blocked waiting for a sync reply from the
811 // stub, we need to make sure to reschedule the GpuChannel here.
812 if (need_reschedule
) {
813 // This stub won't get a chance to reschedule, so update the count now.
814 StubSchedulingChanged(true);
818 void GpuChannel::OnCreateJpegDecoder(int32 route_id
, IPC::Message
* reply_msg
) {
819 if (!jpeg_decoder_
) {
820 jpeg_decoder_
.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_
));
822 jpeg_decoder_
->AddClient(route_id
, reply_msg
);
825 void GpuChannel::MessageProcessed() {
826 messages_processed_
++;
827 if (preempting_flag_
.get()) {
828 io_task_runner_
->PostTask(
829 FROM_HERE
, base::Bind(&GpuChannelMessageFilter::MessageProcessed
,
830 filter_
, messages_processed_
));
834 void GpuChannel::CacheShader(const std::string
& key
,
835 const std::string
& shader
) {
836 gpu_channel_manager_
->Send(
837 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
840 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
841 channel_
->AddFilter(filter
);
844 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
845 channel_
->RemoveFilter(filter
);
848 uint64
GpuChannel::GetMemoryUsage() {
849 // Collect the unique memory trackers in use by the |stubs_|.
850 std::set
<gpu::gles2::MemoryTracker
*> unique_memory_trackers
;
851 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
852 !it
.IsAtEnd(); it
.Advance()) {
853 unique_memory_trackers
.insert(it
.GetCurrentValue()->GetMemoryTracker());
856 // Sum the memory usage for all unique memory trackers.
858 for (auto* tracker
: unique_memory_trackers
) {
859 size
+= gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage(
866 scoped_refptr
<gfx::GLImage
> GpuChannel::CreateImageForGpuMemoryBuffer(
867 const gfx::GpuMemoryBufferHandle
& handle
,
868 const gfx::Size
& size
,
869 gfx::BufferFormat format
,
870 uint32 internalformat
) {
871 switch (handle
.type
) {
872 case gfx::SHARED_MEMORY_BUFFER
: {
873 scoped_refptr
<gfx::GLImageSharedMemory
> image(
874 new gfx::GLImageSharedMemory(size
, internalformat
));
875 if (!image
->Initialize(handle
, format
))
876 return scoped_refptr
<gfx::GLImage
>();
881 GpuChannelManager
* manager
= gpu_channel_manager();
882 if (!manager
->gpu_memory_buffer_factory())
883 return scoped_refptr
<gfx::GLImage
>();
885 return manager
->gpu_memory_buffer_factory()
887 ->CreateImageForGpuMemoryBuffer(handle
,
896 void GpuChannel::HandleUpdateValueState(
897 unsigned int target
, const gpu::ValueState
& state
) {
898 pending_valuebuffer_state_
->UpdateState(target
, state
);
901 bool GpuChannel::OnMemoryDump(const base::trace_event::MemoryDumpArgs
& args
,
902 base::trace_event::ProcessMemoryDump
* pmd
) {
903 auto dump_name
= GetChannelName();
904 std::replace(dump_name
.begin(), dump_name
.end(), '.', '_');
906 base::trace_event::MemoryAllocatorDump
* dump
=
907 pmd
->CreateAllocatorDump(base::StringPrintf("gl/%s", dump_name
.c_str()));
909 dump
->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize
,
910 base::trace_event::MemoryAllocatorDump::kUnitsBytes
,
916 } // namespace content