1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_channel.h"
16 #include "base/atomicops.h"
17 #include "base/bind.h"
18 #include "base/command_line.h"
19 #include "base/location.h"
20 #include "base/single_thread_task_runner.h"
21 #include "base/stl_util.h"
22 #include "base/strings/string_util.h"
23 #include "base/synchronization/lock.h"
24 #include "base/thread_task_runner_handle.h"
25 #include "base/timer/timer.h"
26 #include "base/trace_event/memory_dump_manager.h"
27 #include "base/trace_event/process_memory_dump.h"
28 #include "base/trace_event/trace_event.h"
29 #include "content/common/gpu/gpu_channel_manager.h"
30 #include "content/common/gpu/gpu_memory_buffer_factory.h"
31 #include "content/common/gpu/gpu_messages.h"
32 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
33 #include "content/public/common/content_switches.h"
34 #include "gpu/command_buffer/common/mailbox.h"
35 #include "gpu/command_buffer/common/value_state.h"
36 #include "gpu/command_buffer/service/gpu_scheduler.h"
37 #include "gpu/command_buffer/service/image_factory.h"
38 #include "gpu/command_buffer/service/mailbox_manager.h"
39 #include "gpu/command_buffer/service/sync_point_manager.h"
40 #include "gpu/command_buffer/service/valuebuffer_manager.h"
41 #include "ipc/ipc_channel.h"
42 #include "ipc/message_filter.h"
43 #include "ui/gl/gl_context.h"
44 #include "ui/gl/gl_image_shared_memory.h"
45 #include "ui/gl/gl_surface.h"
48 #include "ipc/ipc_channel_posix.h"
54 // Number of milliseconds between successive vsync. Many GL commands block
55 // on vsync, so thresholds for preemption should be multiples of this.
56 const int64 kVsyncIntervalMs
= 17;
58 // Amount of time that we will wait for an IPC to be processed before
59 // preempting. After a preemption, we must wait this long before triggering
60 // another preemption.
61 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
63 // Once we trigger a preemption, the maximum duration that we will wait
64 // before clearing the preemption.
65 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
67 // Stop the preemption once the time for the longest pending IPC drops
68 // below this threshold.
69 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
71 } // anonymous namespace
73 // Begin order numbers at 1 so 0 can mean no orders.
74 uint32_t GpuChannelMessageQueue::global_order_counter_
= 1;
76 scoped_refptr
<GpuChannelMessageQueue
> GpuChannelMessageQueue::Create(
77 const base::WeakPtr
<GpuChannel
>& gpu_channel
,
78 base::SingleThreadTaskRunner
* task_runner
) {
79 return new GpuChannelMessageQueue(gpu_channel
, task_runner
);
82 GpuChannelMessageQueue::GpuChannelMessageQueue(
83 const base::WeakPtr
<GpuChannel
>& gpu_channel
,
84 base::SingleThreadTaskRunner
* task_runner
)
86 unprocessed_order_num_(0),
87 processed_order_num_(0),
88 gpu_channel_(gpu_channel
),
89 task_runner_(task_runner
) {}
91 GpuChannelMessageQueue::~GpuChannelMessageQueue() {
92 DCHECK(channel_messages_
.empty());
95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
96 base::AutoLock
auto_lock(channel_messages_lock_
);
97 return unprocessed_order_num_
;
100 void GpuChannelMessageQueue::PushBackMessage(const IPC::Message
& message
) {
101 base::AutoLock
auto_lock(channel_messages_lock_
);
103 PushMessageHelper(make_scoped_ptr(new GpuChannelMessage(message
)));
106 bool GpuChannelMessageQueue::GenerateSyncPointMessage(
107 gpu::SyncPointManager
* sync_point_manager
,
108 const IPC::Message
& message
,
109 bool retire_sync_point
,
110 uint32_t* sync_point
) {
111 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID
, message
.type());
113 base::AutoLock
auto_lock(channel_messages_lock_
);
115 *sync_point
= sync_point_manager
->GenerateSyncPoint();
117 scoped_ptr
<GpuChannelMessage
> msg(new GpuChannelMessage(message
));
118 msg
->retire_sync_point
= retire_sync_point
;
119 msg
->sync_point
= *sync_point
;
121 PushMessageHelper(msg
.Pass());
127 bool GpuChannelMessageQueue::HasQueuedMessages() const {
128 base::AutoLock
auto_lock(channel_messages_lock_
);
129 return !channel_messages_
.empty();
132 base::TimeTicks
GpuChannelMessageQueue::GetNextMessageTimeTick() const {
133 base::AutoLock
auto_lock(channel_messages_lock_
);
134 if (!channel_messages_
.empty())
135 return channel_messages_
.front()->time_received
;
136 return base::TimeTicks();
139 GpuChannelMessage
* GpuChannelMessageQueue::GetNextMessage() const {
140 base::AutoLock
auto_lock(channel_messages_lock_
);
141 if (!channel_messages_
.empty()) {
142 DCHECK_GT(channel_messages_
.front()->order_number
, processed_order_num_
);
143 DCHECK_LE(channel_messages_
.front()->order_number
, unprocessed_order_num_
);
144 return channel_messages_
.front();
149 bool GpuChannelMessageQueue::MessageProcessed() {
150 base::AutoLock
auto_lock(channel_messages_lock_
);
151 DCHECK(!channel_messages_
.empty());
152 scoped_ptr
<GpuChannelMessage
> msg(channel_messages_
.front());
153 channel_messages_
.pop_front();
154 processed_order_num_
= msg
->order_number
;
155 return !channel_messages_
.empty();
158 void GpuChannelMessageQueue::DeleteAndDisableMessages(
159 GpuChannelManager
* gpu_channel_manager
) {
161 base::AutoLock
auto_lock(channel_messages_lock_
);
166 // We guarantee that the queues will no longer be modified after enabled_
167 // is set to false, it is now safe to modify the queue without the lock.
168 // All public facing modifying functions check enabled_ while all
169 // private modifying functions DCHECK(enabled_) to enforce this.
170 while (!channel_messages_
.empty()) {
171 scoped_ptr
<GpuChannelMessage
> msg(channel_messages_
.front());
172 channel_messages_
.pop_front();
173 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and
174 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check
175 // if we have a sync point number here.
176 if (msg
->sync_point
) {
177 gpu_channel_manager
->sync_point_manager()->RetireSyncPoint(
183 void GpuChannelMessageQueue::ScheduleHandleMessage() {
184 task_runner_
->PostTask(FROM_HERE
,
185 base::Bind(&GpuChannel::HandleMessage
, gpu_channel_
));
188 void GpuChannelMessageQueue::PushMessageHelper(
189 scoped_ptr
<GpuChannelMessage
> msg
) {
190 channel_messages_lock_
.AssertAcquired();
193 msg
->order_number
= global_order_counter_
++;
194 msg
->time_received
= base::TimeTicks::Now();
196 unprocessed_order_num_
= msg
->order_number
;
198 bool had_messages
= !channel_messages_
.empty();
199 channel_messages_
.push_back(msg
.release());
201 ScheduleHandleMessage();
204 GpuChannelMessageFilter::GpuChannelMessageFilter(
205 const base::WeakPtr
<GpuChannel
>& gpu_channel
,
206 GpuChannelMessageQueue
* message_queue
,
207 gpu::SyncPointManager
* sync_point_manager
,
208 base::SingleThreadTaskRunner
* task_runner
,
209 bool future_sync_points
)
210 : preemption_state_(IDLE
),
211 gpu_channel_(gpu_channel
),
212 message_queue_(message_queue
),
214 peer_pid_(base::kNullProcessId
),
215 sync_point_manager_(sync_point_manager
),
216 task_runner_(task_runner
),
217 a_stub_is_descheduled_(false),
218 future_sync_points_(future_sync_points
) {}
220 GpuChannelMessageFilter::~GpuChannelMessageFilter() {}
222 void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender
* sender
) {
225 timer_
= make_scoped_ptr(new base::OneShotTimer
<GpuChannelMessageFilter
>);
226 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
227 filter
->OnFilterAdded(sender_
);
231 void GpuChannelMessageFilter::OnFilterRemoved() {
233 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
234 filter
->OnFilterRemoved();
237 peer_pid_
= base::kNullProcessId
;
241 void GpuChannelMessageFilter::OnChannelConnected(int32 peer_pid
) {
242 DCHECK(peer_pid_
== base::kNullProcessId
);
243 peer_pid_
= peer_pid
;
244 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
245 filter
->OnChannelConnected(peer_pid
);
249 void GpuChannelMessageFilter::OnChannelError() {
250 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
251 filter
->OnChannelError();
255 void GpuChannelMessageFilter::OnChannelClosing() {
256 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
257 filter
->OnChannelClosing();
261 void GpuChannelMessageFilter::AddChannelFilter(
262 scoped_refptr
<IPC::MessageFilter
> filter
) {
263 channel_filters_
.push_back(filter
);
265 filter
->OnFilterAdded(sender_
);
266 if (peer_pid_
!= base::kNullProcessId
)
267 filter
->OnChannelConnected(peer_pid_
);
270 void GpuChannelMessageFilter::RemoveChannelFilter(
271 scoped_refptr
<IPC::MessageFilter
> filter
) {
273 filter
->OnFilterRemoved();
274 channel_filters_
.erase(
275 std::find(channel_filters_
.begin(), channel_filters_
.end(), filter
));
278 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message
& message
) {
281 if (message
.should_unblock() || message
.is_reply()) {
282 DLOG(ERROR
) << "Unexpected message type";
286 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
287 if (filter
->OnMessageReceived(message
)) {
292 bool handled
= false;
293 if ((message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) &&
294 !future_sync_points_
) {
295 DLOG(ERROR
) << "Untrusted client should not send "
296 "GpuCommandBufferMsg_RetireSyncPoint message";
300 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
301 base::Tuple
<bool> params
;
302 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
303 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message
,
305 reply
->set_reply_error();
309 bool retire_sync_point
= base::get
<0>(params
);
310 if (!future_sync_points_
&& !retire_sync_point
) {
311 DLOG(ERROR
) << "Untrusted contexts can't create future sync points";
312 reply
->set_reply_error();
317 // Message queue must handle the entire sync point generation because the
318 // message queue could be disabled from the main thread during generation.
319 uint32_t sync_point
= 0u;
320 if (!message_queue_
->GenerateSyncPointMessage(
321 sync_point_manager_
, message
, retire_sync_point
, &sync_point
)) {
322 DLOG(ERROR
) << "GpuChannel has been destroyed.";
323 reply
->set_reply_error();
328 DCHECK_NE(sync_point
, 0u);
329 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
334 // Forward all other messages to the GPU Channel.
336 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
337 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
338 task_runner_
->PostTask(FROM_HERE
,
339 base::Bind(&GpuChannel::HandleOutOfOrderMessage
,
340 gpu_channel_
, message
));
342 message_queue_
->PushBackMessage(message
);
347 UpdatePreemptionState();
351 void GpuChannelMessageFilter::OnMessageProcessed() {
352 UpdatePreemptionState();
355 void GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState(
356 gpu::PreemptionFlag
* preempting_flag
,
357 bool a_stub_is_descheduled
) {
358 preempting_flag_
= preempting_flag
;
359 a_stub_is_descheduled_
= a_stub_is_descheduled
;
362 void GpuChannelMessageFilter::UpdateStubSchedulingState(
363 bool a_stub_is_descheduled
) {
364 a_stub_is_descheduled_
= a_stub_is_descheduled
;
365 UpdatePreemptionState();
368 bool GpuChannelMessageFilter::Send(IPC::Message
* message
) {
369 return sender_
->Send(message
);
372 void GpuChannelMessageFilter::UpdatePreemptionState() {
373 switch (preemption_state_
) {
375 if (preempting_flag_
.get() && message_queue_
->HasQueuedMessages())
376 TransitionToWaiting();
379 // A timer will transition us to CHECKING.
380 DCHECK(timer_
->IsRunning());
383 base::TimeTicks time_tick
= message_queue_
->GetNextMessageTimeTick();
384 if (!time_tick
.is_null()) {
385 base::TimeDelta time_elapsed
= base::TimeTicks::Now() - time_tick
;
386 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
387 // Schedule another check for when the IPC may go long.
388 timer_
->Start(FROM_HERE
,
389 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
391 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
393 if (a_stub_is_descheduled_
)
394 TransitionToWouldPreemptDescheduled();
396 TransitionToPreempting();
401 // A TransitionToIdle() timer should always be running in this state.
402 DCHECK(timer_
->IsRunning());
403 if (a_stub_is_descheduled_
)
404 TransitionToWouldPreemptDescheduled();
406 TransitionToIdleIfCaughtUp();
408 case WOULD_PREEMPT_DESCHEDULED
:
409 // A TransitionToIdle() timer should never be running in this state.
410 DCHECK(!timer_
->IsRunning());
411 if (!a_stub_is_descheduled_
)
412 TransitionToPreempting();
414 TransitionToIdleIfCaughtUp();
421 void GpuChannelMessageFilter::TransitionToIdleIfCaughtUp() {
422 DCHECK(preemption_state_
== PREEMPTING
||
423 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
424 base::TimeTicks next_tick
= message_queue_
->GetNextMessageTimeTick();
425 if (next_tick
.is_null()) {
428 base::TimeDelta time_elapsed
= base::TimeTicks::Now() - next_tick
;
429 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
434 void GpuChannelMessageFilter::TransitionToIdle() {
435 DCHECK(preemption_state_
== PREEMPTING
||
436 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
437 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
440 preemption_state_
= IDLE
;
441 preempting_flag_
->Reset();
442 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
444 UpdatePreemptionState();
447 void GpuChannelMessageFilter::TransitionToWaiting() {
448 DCHECK_EQ(preemption_state_
, IDLE
);
449 DCHECK(!timer_
->IsRunning());
451 preemption_state_
= WAITING
;
452 timer_
->Start(FROM_HERE
,
453 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
), this,
454 &GpuChannelMessageFilter::TransitionToChecking
);
457 void GpuChannelMessageFilter::TransitionToChecking() {
458 DCHECK_EQ(preemption_state_
, WAITING
);
459 DCHECK(!timer_
->IsRunning());
461 preemption_state_
= CHECKING
;
462 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
463 UpdatePreemptionState();
466 void GpuChannelMessageFilter::TransitionToPreempting() {
467 DCHECK(preemption_state_
== CHECKING
||
468 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
469 DCHECK(!a_stub_is_descheduled_
);
471 // Stop any pending state update checks that we may have queued
473 if (preemption_state_
== CHECKING
)
476 preemption_state_
= PREEMPTING
;
477 preempting_flag_
->Set();
478 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
480 timer_
->Start(FROM_HERE
, max_preemption_time_
, this,
481 &GpuChannelMessageFilter::TransitionToIdle
);
483 UpdatePreemptionState();
486 void GpuChannelMessageFilter::TransitionToWouldPreemptDescheduled() {
487 DCHECK(preemption_state_
== CHECKING
|| preemption_state_
== PREEMPTING
);
488 DCHECK(a_stub_is_descheduled_
);
490 if (preemption_state_
== CHECKING
) {
491 // Stop any pending state update checks that we may have queued
495 // Stop any TransitionToIdle() timers that we may have queued
498 max_preemption_time_
= timer_
->desired_run_time() - base::TimeTicks::Now();
499 if (max_preemption_time_
< base::TimeDelta()) {
505 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
506 preempting_flag_
->Reset();
507 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
509 UpdatePreemptionState();
512 GpuChannel::StreamState::StreamState(int32 id
, GpuStreamPriority priority
)
513 : id_(id
), priority_(priority
) {}
515 GpuChannel::StreamState::~StreamState() {}
517 void GpuChannel::StreamState::AddRoute(int32 route_id
) {
518 routes_
.insert(route_id
);
520 void GpuChannel::StreamState::RemoveRoute(int32 route_id
) {
521 routes_
.erase(route_id
);
524 bool GpuChannel::StreamState::HasRoute(int32 route_id
) const {
525 return routes_
.find(route_id
) != routes_
.end();
528 bool GpuChannel::StreamState::HasRoutes() const {
529 return !routes_
.empty();
532 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
533 GpuWatchdog
* watchdog
,
534 gfx::GLShareGroup
* share_group
,
535 gpu::gles2::MailboxManager
* mailbox
,
536 base::SingleThreadTaskRunner
* task_runner
,
537 base::SingleThreadTaskRunner
* io_task_runner
,
539 uint64_t client_tracing_id
,
541 bool allow_future_sync_points
,
542 bool allow_real_time_streams
)
543 : gpu_channel_manager_(gpu_channel_manager
),
544 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")),
545 client_id_(client_id
),
546 client_tracing_id_(client_tracing_id
),
547 task_runner_(task_runner
),
548 io_task_runner_(io_task_runner
),
549 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
550 mailbox_manager_(mailbox
551 ? scoped_refptr
<gpu::gles2::MailboxManager
>(mailbox
)
552 : gpu::gles2::MailboxManager::Create()),
553 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet
),
554 pending_valuebuffer_state_(new gpu::ValueStateMap
),
557 num_stubs_descheduled_(0),
558 allow_future_sync_points_(allow_future_sync_points
),
559 allow_real_time_streams_(allow_real_time_streams
),
560 weak_factory_(this) {
561 DCHECK(gpu_channel_manager
);
565 GpuChannelMessageQueue::Create(weak_factory_
.GetWeakPtr(), task_runner
);
567 filter_
= new GpuChannelMessageFilter(
568 weak_factory_
.GetWeakPtr(), message_queue_
.get(),
569 gpu_channel_manager_
->sync_point_manager(), task_runner_
.get(),
570 allow_future_sync_points_
);
572 subscription_ref_set_
->AddObserver(this);
575 GpuChannel::~GpuChannel() {
576 // Clear stubs first because of dependencies.
579 message_queue_
->DeleteAndDisableMessages(gpu_channel_manager_
);
581 subscription_ref_set_
->RemoveObserver(this);
582 if (preempting_flag_
.get())
583 preempting_flag_
->Reset();
586 IPC::ChannelHandle
GpuChannel::Init(base::WaitableEvent
* shutdown_event
) {
587 DCHECK(shutdown_event
);
590 IPC::ChannelHandle
channel_handle(channel_id_
);
593 IPC::SyncChannel::Create(channel_handle
, IPC::Channel::MODE_SERVER
, this,
594 io_task_runner_
, false, shutdown_event
);
596 #if defined(OS_POSIX)
597 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so
598 // that it gets closed after it has been sent.
599 base::ScopedFD renderer_fd
= channel_
->TakeClientFileDescriptor();
600 DCHECK(renderer_fd
.is_valid());
601 channel_handle
.socket
= base::FileDescriptor(renderer_fd
.Pass());
604 channel_
->AddFilter(filter_
.get());
606 return channel_handle
;
609 base::ProcessId
GpuChannel::GetClientPID() const {
610 return channel_
->GetPeerPID();
613 uint32_t GpuChannel::GetProcessedOrderNum() const {
614 return message_queue_
->processed_order_num();
617 uint32_t GpuChannel::GetUnprocessedOrderNum() const {
618 return message_queue_
->GetUnprocessedOrderNum();
621 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
622 // All messages should be pushed to channel_messages_ and handled separately.
627 void GpuChannel::OnChannelError() {
628 gpu_channel_manager_
->RemoveChannel(client_id_
);
631 bool GpuChannel::Send(IPC::Message
* message
) {
632 // The GPU process must never send a synchronous IPC message to the renderer
633 // process. This could result in deadlock.
634 DCHECK(!message
->is_sync());
636 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
637 << " with type " << message
->type();
644 return channel_
->Send(message
);
647 void GpuChannel::OnAddSubscription(unsigned int target
) {
648 gpu_channel_manager()->Send(
649 new GpuHostMsg_AddSubscription(client_id_
, target
));
652 void GpuChannel::OnRemoveSubscription(unsigned int target
) {
653 gpu_channel_manager()->Send(
654 new GpuHostMsg_RemoveSubscription(client_id_
, target
));
657 void GpuChannel::OnStubSchedulingChanged(GpuCommandBufferStub
* stub
,
659 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
661 num_stubs_descheduled_
--;
662 ScheduleHandleMessage();
664 num_stubs_descheduled_
++;
666 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
667 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
669 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
670 if (preempting_flag_
.get()) {
671 io_task_runner_
->PostTask(
673 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
674 filter_
, a_stub_is_descheduled
));
679 CreateCommandBufferResult
GpuChannel::CreateViewCommandBuffer(
680 const gfx::GLSurfaceHandle
& window
,
682 const GPUCreateCommandBufferConfig
& init_params
,
685 "GpuChannel::CreateViewCommandBuffer",
689 int32 share_group_id
= init_params
.share_group_id
;
690 GpuCommandBufferStub
* share_group
= stubs_
.get(share_group_id
);
692 if (!share_group
&& share_group_id
!= MSG_ROUTING_NONE
)
693 return CREATE_COMMAND_BUFFER_FAILED
;
695 int32 stream_id
= init_params
.stream_id
;
696 GpuStreamPriority stream_priority
= init_params
.stream_priority
;
698 if (share_group
&& stream_id
!= share_group
->stream_id())
699 return CREATE_COMMAND_BUFFER_FAILED
;
701 if (!allow_real_time_streams_
&&
702 stream_priority
== GpuStreamPriority::REAL_TIME
)
703 return CREATE_COMMAND_BUFFER_FAILED
;
705 auto stream_it
= streams_
.find(stream_id
);
706 if (stream_it
!= streams_
.end() &&
707 stream_priority
!= GpuStreamPriority::INHERIT
&&
708 stream_priority
!= stream_it
->second
.priority()) {
709 return CREATE_COMMAND_BUFFER_FAILED
;
712 // Virtualize compositor contexts on OS X to prevent performance regressions
713 // when enabling FCM.
714 // http://crbug.com/180463
715 bool use_virtualized_gl_context
= false;
716 #if defined(OS_MACOSX)
717 use_virtualized_gl_context
= true;
720 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
721 this, task_runner_
.get(), share_group
, window
, mailbox_manager_
.get(),
722 subscription_ref_set_
.get(), pending_valuebuffer_state_
.get(),
723 gfx::Size(), disallowed_features_
, init_params
.attribs
,
724 init_params
.gpu_preference
, use_virtualized_gl_context
, stream_id
,
725 route_id
, surface_id
, watchdog_
, software_
, init_params
.active_url
));
727 if (preempted_flag_
.get())
728 stub
->SetPreemptByFlag(preempted_flag_
);
730 if (!router_
.AddRoute(route_id
, stub
.get())) {
731 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
732 "failed to add route";
733 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
;
736 if (stream_it
!= streams_
.end()) {
737 stream_it
->second
.AddRoute(route_id
);
739 StreamState
stream(stream_id
, stream_priority
);
740 stream
.AddRoute(route_id
);
741 streams_
.insert(std::make_pair(stream_id
, stream
));
744 stubs_
.set(route_id
, stub
.Pass());
745 return CREATE_COMMAND_BUFFER_SUCCEEDED
;
748 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
749 return stubs_
.get(route_id
);
752 void GpuChannel::LoseAllContexts() {
753 gpu_channel_manager_
->LoseAllContexts();
756 void GpuChannel::MarkAllContextsLost() {
757 for (auto& kv
: stubs_
)
758 kv
.second
->MarkContextLost();
761 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
762 return router_
.AddRoute(route_id
, listener
);
765 void GpuChannel::RemoveRoute(int32 route_id
) {
766 router_
.RemoveRoute(route_id
);
769 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
770 if (!preempting_flag_
.get()) {
771 preempting_flag_
= new gpu::PreemptionFlag
;
772 io_task_runner_
->PostTask(
775 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
776 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
778 return preempting_flag_
.get();
781 void GpuChannel::SetPreemptByFlag(
782 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
783 preempted_flag_
= preempted_flag
;
785 for (auto& kv
: stubs_
)
786 kv
.second
->SetPreemptByFlag(preempted_flag_
);
789 void GpuChannel::OnDestroy() {
790 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
791 gpu_channel_manager_
->RemoveChannel(client_id_
);
794 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
796 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
797 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
798 OnCreateOffscreenCommandBuffer
)
799 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
800 OnDestroyCommandBuffer
)
801 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder
,
803 IPC_MESSAGE_UNHANDLED(handled
= false)
804 IPC_END_MESSAGE_MAP()
805 DCHECK(handled
) << msg
.type();
809 void GpuChannel::HandleMessage() {
810 // If we have been preempted by another channel, just post a task to wake up.
811 if (preempted_flag_
&& preempted_flag_
->IsSet()) {
812 ScheduleHandleMessage();
816 GpuChannelMessage
* m
= message_queue_
->GetNextMessage();
818 // TODO(sunnyps): This could be a DCHECK maybe?
822 current_order_num_
= m
->order_number
;
823 const IPC::Message
& message
= m
->message
;
824 int32_t routing_id
= message
.routing_id();
825 GpuCommandBufferStub
* stub
= stubs_
.get(routing_id
);
827 DCHECK(!stub
|| stub
->IsScheduled());
829 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
830 << " with type " << message
.type();
832 bool handled
= false;
834 if (routing_id
== MSG_ROUTING_CONTROL
) {
835 handled
= OnControlMessageReceived(message
);
836 } else if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
837 // TODO(dyen): Temporary handling of old sync points.
838 // This must ensure that the sync point will be retired. Normally we'll
839 // find the stub based on the routing ID, and associate the sync point
840 // with it, but if that fails for any reason (channel or stub already
841 // deleted, invalid routing id), we need to retire the sync point
844 stub
->AddSyncPoint(m
->sync_point
, m
->retire_sync_point
);
846 gpu_channel_manager_
->sync_point_manager()->RetireSyncPoint(
851 handled
= router_
.RouteMessage(message
);
854 // Respond to sync messages even if router failed to route.
855 if (!handled
&& message
.is_sync()) {
856 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
857 reply
->set_reply_error();
862 // A command buffer may be descheduled or preempted but only in the middle of
863 // a flush. In this case we should not pop the message from the queue.
864 if (stub
&& stub
->HasUnprocessedCommands()) {
865 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID
, message
.type());
866 // If the stub is still scheduled then we were preempted and need to
867 // schedule a wakeup otherwise some other event will wake us up e.g. sync
868 // point completion. No DCHECK for preemption flag because that can change
870 if (stub
->IsScheduled())
871 ScheduleHandleMessage();
875 if (message_queue_
->MessageProcessed())
876 ScheduleHandleMessage();
878 if (preempting_flag_
) {
879 io_task_runner_
->PostTask(
881 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed
, filter_
));
885 void GpuChannel::ScheduleHandleMessage() {
886 task_runner_
->PostTask(FROM_HERE
, base::Bind(&GpuChannel::HandleMessage
,
887 weak_factory_
.GetWeakPtr()));
890 void GpuChannel::HandleOutOfOrderMessage(const IPC::Message
& msg
) {
891 switch (msg
.type()) {
892 case GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
:
893 case GpuCommandBufferMsg_WaitForTokenInRange::ID
:
894 router_
.RouteMessage(msg
);
901 void GpuChannel::OnCreateOffscreenCommandBuffer(
902 const gfx::Size
& size
,
903 const GPUCreateCommandBufferConfig
& init_params
,
906 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id",
909 int32 share_group_id
= init_params
.share_group_id
;
910 GpuCommandBufferStub
* share_group
= stubs_
.get(share_group_id
);
912 if (!share_group
&& share_group_id
!= MSG_ROUTING_NONE
) {
917 int32 stream_id
= init_params
.stream_id
;
918 GpuStreamPriority stream_priority
= init_params
.stream_priority
;
920 if (share_group
&& stream_id
!= share_group
->stream_id()) {
925 if (!allow_real_time_streams_
&&
926 stream_priority
== GpuStreamPriority::REAL_TIME
) {
931 auto stream_it
= streams_
.find(stream_id
);
932 if (stream_it
!= streams_
.end() &&
933 stream_priority
!= GpuStreamPriority::INHERIT
&&
934 stream_priority
!= stream_it
->second
.priority()) {
939 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
940 this, task_runner_
.get(), share_group
, gfx::GLSurfaceHandle(),
941 mailbox_manager_
.get(), subscription_ref_set_
.get(),
942 pending_valuebuffer_state_
.get(), size
, disallowed_features_
,
943 init_params
.attribs
, init_params
.gpu_preference
, false,
944 init_params
.stream_id
, route_id
, 0, watchdog_
, software_
,
945 init_params
.active_url
));
947 if (preempted_flag_
.get())
948 stub
->SetPreemptByFlag(preempted_flag_
);
950 if (!router_
.AddRoute(route_id
, stub
.get())) {
951 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
952 "failed to add route";
957 if (stream_it
!= streams_
.end()) {
958 stream_it
->second
.AddRoute(route_id
);
960 StreamState
stream(stream_id
, stream_priority
);
961 stream
.AddRoute(route_id
);
962 streams_
.insert(std::make_pair(stream_id
, stream
));
965 stubs_
.set(route_id
, stub
.Pass());
969 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
970 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
971 "route_id", route_id
);
973 scoped_ptr
<GpuCommandBufferStub
> stub
= stubs_
.take_and_erase(route_id
);
978 router_
.RemoveRoute(route_id
);
980 int32 stream_id
= stub
->stream_id();
981 auto stream_it
= streams_
.find(stream_id
);
982 DCHECK(stream_it
!= streams_
.end());
983 stream_it
->second
.RemoveRoute(route_id
);
984 if (!stream_it
->second
.HasRoutes())
985 streams_
.erase(stream_it
);
987 // In case the renderer is currently blocked waiting for a sync reply from the
988 // stub, we need to make sure to reschedule the GpuChannel here.
989 if (!stub
->IsScheduled()) {
990 // This stub won't get a chance to reschedule, so update the count now.
991 OnStubSchedulingChanged(stub
.get(), true);
995 void GpuChannel::OnCreateJpegDecoder(int32 route_id
, IPC::Message
* reply_msg
) {
996 if (!jpeg_decoder_
) {
997 jpeg_decoder_
.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_
));
999 jpeg_decoder_
->AddClient(route_id
, reply_msg
);
1002 void GpuChannel::CacheShader(const std::string
& key
,
1003 const std::string
& shader
) {
1004 gpu_channel_manager_
->Send(
1005 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
1008 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
1009 io_task_runner_
->PostTask(
1010 FROM_HERE
, base::Bind(&GpuChannelMessageFilter::AddChannelFilter
,
1011 filter_
, make_scoped_refptr(filter
)));
1014 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
1015 io_task_runner_
->PostTask(
1016 FROM_HERE
, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter
,
1017 filter_
, make_scoped_refptr(filter
)));
1020 uint64
GpuChannel::GetMemoryUsage() {
1021 // Collect the unique memory trackers in use by the |stubs_|.
1022 std::set
<gpu::gles2::MemoryTracker
*> unique_memory_trackers
;
1023 for (auto& kv
: stubs_
)
1024 unique_memory_trackers
.insert(kv
.second
->GetMemoryTracker());
1026 // Sum the memory usage for all unique memory trackers.
1028 for (auto* tracker
: unique_memory_trackers
) {
1029 size
+= gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage(
1036 scoped_refptr
<gfx::GLImage
> GpuChannel::CreateImageForGpuMemoryBuffer(
1037 const gfx::GpuMemoryBufferHandle
& handle
,
1038 const gfx::Size
& size
,
1039 gfx::BufferFormat format
,
1040 uint32 internalformat
) {
1041 switch (handle
.type
) {
1042 case gfx::SHARED_MEMORY_BUFFER
: {
1043 scoped_refptr
<gfx::GLImageSharedMemory
> image(
1044 new gfx::GLImageSharedMemory(size
, internalformat
));
1045 if (!image
->Initialize(handle
, format
))
1046 return scoped_refptr
<gfx::GLImage
>();
1051 GpuChannelManager
* manager
= gpu_channel_manager();
1052 if (!manager
->gpu_memory_buffer_factory())
1053 return scoped_refptr
<gfx::GLImage
>();
1055 return manager
->gpu_memory_buffer_factory()
1057 ->CreateImageForGpuMemoryBuffer(handle
,
1066 void GpuChannel::HandleUpdateValueState(
1067 unsigned int target
, const gpu::ValueState
& state
) {
1068 pending_valuebuffer_state_
->UpdateState(target
, state
);
1071 } // namespace content