1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "content/common/gpu/gpu_channel.h"
15 #include "base/bind.h"
16 #include "base/command_line.h"
17 #include "base/location.h"
18 #include "base/single_thread_task_runner.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_util.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/timer/timer.h"
23 #include "base/trace_event/memory_dump_manager.h"
24 #include "base/trace_event/process_memory_dump.h"
25 #include "base/trace_event/trace_event.h"
26 #include "content/common/gpu/gpu_channel_manager.h"
27 #include "content/common/gpu/gpu_memory_buffer_factory.h"
28 #include "content/common/gpu/gpu_messages.h"
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
30 #include "content/public/common/content_switches.h"
31 #include "gpu/command_buffer/common/mailbox.h"
32 #include "gpu/command_buffer/common/value_state.h"
33 #include "gpu/command_buffer/service/gpu_scheduler.h"
34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/valuebuffer_manager.h"
38 #include "ipc/ipc_channel.h"
39 #include "ipc/message_filter.h"
40 #include "ui/gl/gl_context.h"
41 #include "ui/gl/gl_image_shared_memory.h"
42 #include "ui/gl/gl_surface.h"
45 #include "ipc/ipc_channel_posix.h"
51 // Number of milliseconds between successive vsync. Many GL commands block
52 // on vsync, so thresholds for preemption should be multiples of this.
53 const int64 kVsyncIntervalMs
= 17;
55 // Amount of time that we will wait for an IPC to be processed before
56 // preempting. After a preemption, we must wait this long before triggering
57 // another preemption.
58 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
60 // Once we trigger a preemption, the maximum duration that we will wait
61 // before clearing the preemption.
62 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
64 // Stop the preemption once the time for the longest pending IPC drops
65 // below this threshold.
66 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
68 } // anonymous namespace
70 // This filter does three things:
71 // - it counts and timestamps each message forwarded to the channel
72 // so that we can preempt other channels if a message takes too long to
73 // process. To guarantee fairness, we must wait a minimum amount of time
74 // before preempting and we limit the amount of time that we can preempt in
75 // one shot (see constants above).
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
77 // thread, generating the sync point ID and responding immediately, and then
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
79 // into the channel's queue.
80 // - it generates mailbox names for clients of the GPU process on the IO thread.
81 class GpuChannelMessageFilter
: public IPC::MessageFilter
{
83 GpuChannelMessageFilter(
84 base::WeakPtr
<GpuChannel
> gpu_channel
,
85 scoped_refptr
<gpu::SyncPointManager
> sync_point_manager
,
86 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner
,
87 bool future_sync_points
)
88 : preemption_state_(IDLE
),
89 gpu_channel_(gpu_channel
),
91 sync_point_manager_(sync_point_manager
),
92 task_runner_(task_runner
),
93 messages_forwarded_to_channel_(0),
94 a_stub_is_descheduled_(false),
95 future_sync_points_(future_sync_points
) {}
97 void OnFilterAdded(IPC::Sender
* sender
) override
{
102 void OnFilterRemoved() override
{
107 bool OnMessageReceived(const IPC::Message
& message
) override
{
110 bool handled
= false;
111 if ((message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) &&
112 !future_sync_points_
) {
113 DLOG(ERROR
) << "Untrusted client should not send "
114 "GpuCommandBufferMsg_RetireSyncPoint message";
118 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
119 base::Tuple
<bool> retire
;
120 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
121 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message
,
123 reply
->set_reply_error();
127 if (!future_sync_points_
&& !base::get
<0>(retire
)) {
128 LOG(ERROR
) << "Untrusted contexts can't create future sync points";
129 reply
->set_reply_error();
133 uint32 sync_point
= sync_point_manager_
->GenerateSyncPoint();
134 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
136 task_runner_
->PostTask(
138 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread
,
139 gpu_channel_
, sync_point_manager_
, message
.routing_id(),
140 base::get
<0>(retire
), sync_point
));
144 // All other messages get processed by the GpuChannel.
145 messages_forwarded_to_channel_
++;
146 if (preempting_flag_
.get())
147 pending_messages_
.push(PendingMessage(messages_forwarded_to_channel_
));
148 UpdatePreemptionState();
153 void MessageProcessed(uint64 messages_processed
) {
154 while (!pending_messages_
.empty() &&
155 pending_messages_
.front().message_number
<= messages_processed
)
156 pending_messages_
.pop();
157 UpdatePreemptionState();
160 void SetPreemptingFlagAndSchedulingState(
161 gpu::PreemptionFlag
* preempting_flag
,
162 bool a_stub_is_descheduled
) {
163 preempting_flag_
= preempting_flag
;
164 a_stub_is_descheduled_
= a_stub_is_descheduled
;
167 void UpdateStubSchedulingState(bool a_stub_is_descheduled
) {
168 a_stub_is_descheduled_
= a_stub_is_descheduled
;
169 UpdatePreemptionState();
172 bool Send(IPC::Message
* message
) {
173 return sender_
->Send(message
);
177 ~GpuChannelMessageFilter() override
{}
180 enum PreemptionState
{
181 // Either there's no other channel to preempt, there are no messages
182 // pending processing, or we just finished preempting and have to wait
183 // before preempting again.
185 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
187 // We can preempt whenever any IPC processing takes more than
188 // kPreemptWaitTimeMs.
190 // We are currently preempting (i.e. no stub is descheduled).
192 // We would like to preempt, but some stub is descheduled.
193 WOULD_PREEMPT_DESCHEDULED
,
196 PreemptionState preemption_state_
;
198 // Maximum amount of time that we can spend in PREEMPTING.
199 // It is reset when we transition to IDLE.
200 base::TimeDelta max_preemption_time_
;
202 struct PendingMessage
{
203 uint64 message_number
;
204 base::TimeTicks time_received
;
206 explicit PendingMessage(uint64 message_number
)
207 : message_number(message_number
),
208 time_received(base::TimeTicks::Now()) {
212 void UpdatePreemptionState() {
213 switch (preemption_state_
) {
215 if (preempting_flag_
.get() && !pending_messages_
.empty())
216 TransitionToWaiting();
219 // A timer will transition us to CHECKING.
220 DCHECK(timer_
.IsRunning());
223 if (!pending_messages_
.empty()) {
224 base::TimeDelta time_elapsed
=
225 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
226 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
227 // Schedule another check for when the IPC may go long.
230 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
232 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
234 if (a_stub_is_descheduled_
)
235 TransitionToWouldPreemptDescheduled();
237 TransitionToPreempting();
242 // A TransitionToIdle() timer should always be running in this state.
243 DCHECK(timer_
.IsRunning());
244 if (a_stub_is_descheduled_
)
245 TransitionToWouldPreemptDescheduled();
247 TransitionToIdleIfCaughtUp();
249 case WOULD_PREEMPT_DESCHEDULED
:
250 // A TransitionToIdle() timer should never be running in this state.
251 DCHECK(!timer_
.IsRunning());
252 if (!a_stub_is_descheduled_
)
253 TransitionToPreempting();
255 TransitionToIdleIfCaughtUp();
262 void TransitionToIdleIfCaughtUp() {
263 DCHECK(preemption_state_
== PREEMPTING
||
264 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
265 if (pending_messages_
.empty()) {
268 base::TimeDelta time_elapsed
=
269 base::TimeTicks::Now() - pending_messages_
.front().time_received
;
270 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
275 void TransitionToIdle() {
276 DCHECK(preemption_state_
== PREEMPTING
||
277 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
278 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
281 preemption_state_
= IDLE
;
282 preempting_flag_
->Reset();
283 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
285 UpdatePreemptionState();
288 void TransitionToWaiting() {
289 DCHECK_EQ(preemption_state_
, IDLE
);
290 DCHECK(!timer_
.IsRunning());
292 preemption_state_
= WAITING
;
295 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
),
296 this, &GpuChannelMessageFilter::TransitionToChecking
);
299 void TransitionToChecking() {
300 DCHECK_EQ(preemption_state_
, WAITING
);
301 DCHECK(!timer_
.IsRunning());
303 preemption_state_
= CHECKING
;
304 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
305 UpdatePreemptionState();
308 void TransitionToPreempting() {
309 DCHECK(preemption_state_
== CHECKING
||
310 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
311 DCHECK(!a_stub_is_descheduled_
);
313 // Stop any pending state update checks that we may have queued
315 if (preemption_state_
== CHECKING
)
318 preemption_state_
= PREEMPTING
;
319 preempting_flag_
->Set();
320 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
324 max_preemption_time_
,
325 this, &GpuChannelMessageFilter::TransitionToIdle
);
327 UpdatePreemptionState();
330 void TransitionToWouldPreemptDescheduled() {
331 DCHECK(preemption_state_
== CHECKING
||
332 preemption_state_
== PREEMPTING
);
333 DCHECK(a_stub_is_descheduled_
);
335 if (preemption_state_
== CHECKING
) {
336 // Stop any pending state update checks that we may have queued
340 // Stop any TransitionToIdle() timers that we may have queued
343 max_preemption_time_
= timer_
.desired_run_time() - base::TimeTicks::Now();
344 if (max_preemption_time_
< base::TimeDelta()) {
350 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
351 preempting_flag_
->Reset();
352 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
354 UpdatePreemptionState();
357 static void InsertSyncPointOnMainThread(
358 base::WeakPtr
<GpuChannel
> gpu_channel
,
359 scoped_refptr
<gpu::SyncPointManager
> manager
,
363 // This function must ensure that the sync point will be retired. Normally
364 // we'll find the stub based on the routing ID, and associate the sync point
365 // with it, but if that fails for any reason (channel or stub already
366 // deleted, invalid routing id), we need to retire the sync point
369 GpuCommandBufferStub
* stub
= gpu_channel
->LookupCommandBuffer(routing_id
);
371 stub
->AddSyncPoint(sync_point
);
373 GpuCommandBufferMsg_RetireSyncPoint
message(routing_id
, sync_point
);
374 gpu_channel
->OnMessageReceived(message
);
378 gpu_channel
->MessageProcessed();
381 manager
->RetireSyncPoint(sync_point
);
384 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
385 // passed through - therefore the WeakPtr assumptions are respected.
386 base::WeakPtr
<GpuChannel
> gpu_channel_
;
387 IPC::Sender
* sender_
;
388 scoped_refptr
<gpu::SyncPointManager
> sync_point_manager_
;
389 scoped_refptr
<base::SingleThreadTaskRunner
> task_runner_
;
390 scoped_refptr
<gpu::PreemptionFlag
> preempting_flag_
;
392 std::queue
<PendingMessage
> pending_messages_
;
394 // Count of the number of IPCs forwarded to the GpuChannel.
395 uint64 messages_forwarded_to_channel_
;
397 base::OneShotTimer
<GpuChannelMessageFilter
> timer_
;
399 bool a_stub_is_descheduled_
;
401 // True if this channel can create future sync points.
402 bool future_sync_points_
;
405 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
406 GpuWatchdog
* watchdog
,
407 gfx::GLShareGroup
* share_group
,
408 gpu::gles2::MailboxManager
* mailbox
,
411 bool allow_future_sync_points
)
412 : gpu_channel_manager_(gpu_channel_manager
),
413 messages_processed_(0),
414 client_id_(client_id
),
415 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
416 mailbox_manager_(mailbox
417 ? scoped_refptr
<gpu::gles2::MailboxManager
>(mailbox
)
418 : gpu::gles2::MailboxManager::Create()),
421 handle_messages_scheduled_(false),
422 currently_processing_message_(NULL
),
423 num_stubs_descheduled_(0),
424 allow_future_sync_points_(allow_future_sync_points
),
425 weak_factory_(this) {
426 DCHECK(gpu_channel_manager
);
429 channel_id_
= IPC::Channel::GenerateVerifiedChannelID("gpu");
430 const base::CommandLine
* command_line
=
431 base::CommandLine::ForCurrentProcess();
432 log_messages_
= command_line
->HasSwitch(switches::kLogPluginMessages
);
434 subscription_ref_set_
= new gpu::gles2::SubscriptionRefSet();
435 subscription_ref_set_
->AddObserver(this);
438 GpuChannel::~GpuChannel() {
439 STLDeleteElements(&deferred_messages_
);
440 subscription_ref_set_
->RemoveObserver(this);
441 if (preempting_flag_
.get())
442 preempting_flag_
->Reset();
444 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
448 void GpuChannel::Init(base::SingleThreadTaskRunner
* io_task_runner
,
449 base::WaitableEvent
* shutdown_event
,
450 IPC::AttachmentBroker
* broker
) {
451 DCHECK(!channel_
.get());
453 // Map renderer ID to a (single) channel to that process.
455 IPC::SyncChannel::Create(channel_id_
, IPC::Channel::MODE_SERVER
, this,
456 io_task_runner
, false, shutdown_event
, broker
);
458 filter_
= new GpuChannelMessageFilter(
459 weak_factory_
.GetWeakPtr(), gpu_channel_manager_
->sync_point_manager(),
460 base::ThreadTaskRunnerHandle::Get(), allow_future_sync_points_
);
461 io_task_runner_
= io_task_runner
;
462 channel_
->AddFilter(filter_
.get());
463 pending_valuebuffer_state_
= new gpu::ValueStateMap();
465 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
466 this, base::ThreadTaskRunnerHandle::Get());
469 std::string
GpuChannel::GetChannelName() {
473 #if defined(OS_POSIX)
474 base::ScopedFD
GpuChannel::TakeRendererFileDescriptor() {
477 return base::ScopedFD();
479 return channel_
->TakeClientFileDescriptor();
481 #endif // defined(OS_POSIX)
483 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
485 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
486 << " with type " << message
.type();
489 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
490 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
491 // Move Wait commands to the head of the queue, so the renderer
492 // doesn't have to wait any longer than necessary.
493 deferred_messages_
.push_front(new IPC::Message(message
));
495 deferred_messages_
.push_back(new IPC::Message(message
));
503 void GpuChannel::OnChannelError() {
504 gpu_channel_manager_
->RemoveChannel(client_id_
);
507 bool GpuChannel::Send(IPC::Message
* message
) {
508 // The GPU process must never send a synchronous IPC message to the renderer
509 // process. This could result in deadlock.
510 DCHECK(!message
->is_sync());
512 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
513 << " with type " << message
->type();
521 return channel_
->Send(message
);
524 void GpuChannel::OnAddSubscription(unsigned int target
) {
525 gpu_channel_manager()->Send(
526 new GpuHostMsg_AddSubscription(client_id_
, target
));
529 void GpuChannel::OnRemoveSubscription(unsigned int target
) {
530 gpu_channel_manager()->Send(
531 new GpuHostMsg_RemoveSubscription(client_id_
, target
));
534 void GpuChannel::RequeueMessage() {
535 DCHECK(currently_processing_message_
);
536 deferred_messages_
.push_front(
537 new IPC::Message(*currently_processing_message_
));
538 messages_processed_
--;
539 currently_processing_message_
= NULL
;
542 void GpuChannel::OnScheduled() {
543 if (handle_messages_scheduled_
)
545 // Post a task to handle any deferred messages. The deferred message queue is
546 // not emptied here, which ensures that OnMessageReceived will continue to
547 // defer newly received messages until the ones in the queue have all been
548 // handled by HandleMessage. HandleMessage is invoked as a
549 // task to prevent reentrancy.
550 base::ThreadTaskRunnerHandle::Get()->PostTask(
552 base::Bind(&GpuChannel::HandleMessage
, weak_factory_
.GetWeakPtr()));
553 handle_messages_scheduled_
= true;
556 void GpuChannel::StubSchedulingChanged(bool scheduled
) {
557 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
559 num_stubs_descheduled_
--;
562 num_stubs_descheduled_
++;
564 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
565 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
567 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
568 if (preempting_flag_
.get()) {
569 io_task_runner_
->PostTask(
571 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
572 filter_
, a_stub_is_descheduled
));
577 CreateCommandBufferResult
GpuChannel::CreateViewCommandBuffer(
578 const gfx::GLSurfaceHandle
& window
,
580 const GPUCreateCommandBufferConfig
& init_params
,
583 "GpuChannel::CreateViewCommandBuffer",
587 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
589 // Virtualize compositor contexts on OS X to prevent performance regressions
590 // when enabling FCM.
591 // http://crbug.com/180463
592 bool use_virtualized_gl_context
= false;
593 #if defined(OS_MACOSX)
594 use_virtualized_gl_context
= true;
597 scoped_ptr
<GpuCommandBufferStub
> stub(
598 new GpuCommandBufferStub(this,
601 mailbox_manager_
.get(),
602 subscription_ref_set_
.get(),
603 pending_valuebuffer_state_
.get(),
605 disallowed_features_
,
607 init_params
.gpu_preference
,
608 use_virtualized_gl_context
,
613 init_params
.active_url
));
614 if (preempted_flag_
.get())
615 stub
->SetPreemptByFlag(preempted_flag_
);
616 if (!router_
.AddRoute(route_id
, stub
.get())) {
617 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
618 "failed to add route";
619 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
;
621 stubs_
.AddWithID(stub
.release(), route_id
);
622 return CREATE_COMMAND_BUFFER_SUCCEEDED
;
625 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
626 return stubs_
.Lookup(route_id
);
629 void GpuChannel::LoseAllContexts() {
630 gpu_channel_manager_
->LoseAllContexts();
633 void GpuChannel::MarkAllContextsLost() {
634 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
635 !it
.IsAtEnd(); it
.Advance()) {
636 it
.GetCurrentValue()->MarkContextLost();
640 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
641 return router_
.AddRoute(route_id
, listener
);
644 void GpuChannel::RemoveRoute(int32 route_id
) {
645 router_
.RemoveRoute(route_id
);
648 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
649 if (!preempting_flag_
.get()) {
650 preempting_flag_
= new gpu::PreemptionFlag
;
651 io_task_runner_
->PostTask(
654 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
655 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
657 return preempting_flag_
.get();
660 void GpuChannel::SetPreemptByFlag(
661 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
662 preempted_flag_
= preempted_flag
;
664 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
665 !it
.IsAtEnd(); it
.Advance()) {
666 it
.GetCurrentValue()->SetPreemptByFlag(preempted_flag_
);
670 void GpuChannel::OnDestroy() {
671 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
672 gpu_channel_manager_
->RemoveChannel(client_id_
);
675 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
677 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
678 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
679 OnCreateOffscreenCommandBuffer
)
680 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
681 OnDestroyCommandBuffer
)
682 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder
,
684 IPC_MESSAGE_UNHANDLED(handled
= false)
685 IPC_END_MESSAGE_MAP()
686 DCHECK(handled
) << msg
.type();
690 void GpuChannel::HandleMessage() {
691 handle_messages_scheduled_
= false;
692 if (deferred_messages_
.empty())
695 IPC::Message
* m
= NULL
;
696 GpuCommandBufferStub
* stub
= NULL
;
698 m
= deferred_messages_
.front();
699 stub
= stubs_
.Lookup(m
->routing_id());
701 if (!stub
->IsScheduled())
703 if (stub
->IsPreempted()) {
709 scoped_ptr
<IPC::Message
> message(m
);
710 deferred_messages_
.pop_front();
711 bool message_processed
= true;
713 currently_processing_message_
= message
.get();
715 if (message
->routing_id() == MSG_ROUTING_CONTROL
)
716 result
= OnControlMessageReceived(*message
);
718 result
= router_
.RouteMessage(*message
);
719 currently_processing_message_
= NULL
;
722 // Respond to sync messages even if router failed to route.
723 if (message
->is_sync()) {
724 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&*message
);
725 reply
->set_reply_error();
729 // If the command buffer becomes unscheduled as a result of handling the
730 // message but still has more commands to process, synthesize an IPC
731 // message to flush that command buffer.
733 if (stub
->HasUnprocessedCommands()) {
734 deferred_messages_
.push_front(new GpuCommandBufferMsg_Rescheduled(
736 message_processed
= false;
740 if (message_processed
)
743 if (!deferred_messages_
.empty()) {
748 void GpuChannel::OnCreateOffscreenCommandBuffer(
749 const gfx::Size
& size
,
750 const GPUCreateCommandBufferConfig
& init_params
,
753 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
754 GpuCommandBufferStub
* share_group
= stubs_
.Lookup(init_params
.share_group_id
);
756 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
759 gfx::GLSurfaceHandle(),
760 mailbox_manager_
.get(),
761 subscription_ref_set_
.get(),
762 pending_valuebuffer_state_
.get(),
764 disallowed_features_
,
766 init_params
.gpu_preference
,
772 init_params
.active_url
));
773 if (preempted_flag_
.get())
774 stub
->SetPreemptByFlag(preempted_flag_
);
775 if (!router_
.AddRoute(route_id
, stub
.get())) {
776 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
777 "failed to add route";
781 stubs_
.AddWithID(stub
.release(), route_id
);
782 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
783 "route_id", route_id
);
787 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
788 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
789 "route_id", route_id
);
791 GpuCommandBufferStub
* stub
= stubs_
.Lookup(route_id
);
794 bool need_reschedule
= (stub
&& !stub
->IsScheduled());
795 router_
.RemoveRoute(route_id
);
796 stubs_
.Remove(route_id
);
797 // In case the renderer is currently blocked waiting for a sync reply from the
798 // stub, we need to make sure to reschedule the GpuChannel here.
799 if (need_reschedule
) {
800 // This stub won't get a chance to reschedule, so update the count now.
801 StubSchedulingChanged(true);
805 void GpuChannel::OnCreateJpegDecoder(int32 route_id
, IPC::Message
* reply_msg
) {
806 if (!jpeg_decoder_
) {
807 jpeg_decoder_
.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_
));
809 jpeg_decoder_
->AddClient(route_id
, reply_msg
);
812 void GpuChannel::MessageProcessed() {
813 messages_processed_
++;
814 if (preempting_flag_
.get()) {
815 io_task_runner_
->PostTask(
816 FROM_HERE
, base::Bind(&GpuChannelMessageFilter::MessageProcessed
,
817 filter_
, messages_processed_
));
821 void GpuChannel::CacheShader(const std::string
& key
,
822 const std::string
& shader
) {
823 gpu_channel_manager_
->Send(
824 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
827 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
828 channel_
->AddFilter(filter
);
831 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
832 channel_
->RemoveFilter(filter
);
835 uint64
GpuChannel::GetMemoryUsage() {
837 for (StubMap::Iterator
<GpuCommandBufferStub
> it(&stubs_
);
838 !it
.IsAtEnd(); it
.Advance()) {
839 size
+= it
.GetCurrentValue()->GetMemoryUsage();
844 scoped_refptr
<gfx::GLImage
> GpuChannel::CreateImageForGpuMemoryBuffer(
845 const gfx::GpuMemoryBufferHandle
& handle
,
846 const gfx::Size
& size
,
847 gfx::GpuMemoryBuffer::Format format
,
848 uint32 internalformat
) {
849 switch (handle
.type
) {
850 case gfx::SHARED_MEMORY_BUFFER
: {
851 scoped_refptr
<gfx::GLImageSharedMemory
> image(
852 new gfx::GLImageSharedMemory(size
, internalformat
));
853 if (!image
->Initialize(handle
, format
))
854 return scoped_refptr
<gfx::GLImage
>();
859 GpuChannelManager
* manager
= gpu_channel_manager();
860 if (!manager
->gpu_memory_buffer_factory())
861 return scoped_refptr
<gfx::GLImage
>();
863 return manager
->gpu_memory_buffer_factory()
865 ->CreateImageForGpuMemoryBuffer(handle
,
874 void GpuChannel::HandleUpdateValueState(
875 unsigned int target
, const gpu::ValueState
& state
) {
876 pending_valuebuffer_state_
->UpdateState(target
, state
);
879 bool GpuChannel::OnMemoryDump(base::trace_event::ProcessMemoryDump
* pmd
) {
880 auto dump_name
= GetChannelName();
881 std::replace(dump_name
.begin(), dump_name
.end(), '.', '_');
883 base::trace_event::MemoryAllocatorDump
* dump
=
884 pmd
->CreateAllocatorDump(base::StringPrintf("gl/%s", dump_name
.c_str()));
886 dump
->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize
,
887 base::trace_event::MemoryAllocatorDump::kUnitsBytes
,
893 } // namespace content