1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "content/common/gpu/gpu_channel.h"
16 #include "base/atomicops.h"
17 #include "base/bind.h"
18 #include "base/command_line.h"
19 #include "base/location.h"
20 #include "base/single_thread_task_runner.h"
21 #include "base/stl_util.h"
22 #include "base/strings/string_util.h"
23 #include "base/synchronization/lock.h"
24 #include "base/thread_task_runner_handle.h"
25 #include "base/timer/timer.h"
26 #include "base/trace_event/memory_dump_manager.h"
27 #include "base/trace_event/process_memory_dump.h"
28 #include "base/trace_event/trace_event.h"
29 #include "content/common/gpu/gpu_channel_manager.h"
30 #include "content/common/gpu/gpu_memory_buffer_factory.h"
31 #include "content/common/gpu/gpu_messages.h"
32 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
33 #include "content/public/common/content_switches.h"
34 #include "gpu/command_buffer/common/mailbox.h"
35 #include "gpu/command_buffer/common/value_state.h"
36 #include "gpu/command_buffer/service/gpu_scheduler.h"
37 #include "gpu/command_buffer/service/image_factory.h"
38 #include "gpu/command_buffer/service/mailbox_manager.h"
39 #include "gpu/command_buffer/service/sync_point_manager.h"
40 #include "gpu/command_buffer/service/valuebuffer_manager.h"
41 #include "ipc/ipc_channel.h"
42 #include "ipc/message_filter.h"
43 #include "ui/gl/gl_context.h"
44 #include "ui/gl/gl_image_shared_memory.h"
45 #include "ui/gl/gl_surface.h"
48 #include "ipc/ipc_channel_posix.h"
54 // Number of milliseconds between successive vsync. Many GL commands block
55 // on vsync, so thresholds for preemption should be multiples of this.
56 const int64 kVsyncIntervalMs
= 17;
58 // Amount of time that we will wait for an IPC to be processed before
59 // preempting. After a preemption, we must wait this long before triggering
60 // another preemption.
61 const int64 kPreemptWaitTimeMs
= 2 * kVsyncIntervalMs
;
63 // Once we trigger a preemption, the maximum duration that we will wait
64 // before clearing the preemption.
65 const int64 kMaxPreemptTimeMs
= kVsyncIntervalMs
;
67 // Stop the preemption once the time for the longest pending IPC drops
68 // below this threshold.
69 const int64 kStopPreemptThresholdMs
= kVsyncIntervalMs
;
71 const uint32_t kOutOfOrderNumber
= static_cast<uint32_t>(-1);
73 } // anonymous namespace
75 scoped_refptr
<GpuChannelMessageQueue
> GpuChannelMessageQueue::Create(
76 const base::WeakPtr
<GpuChannel
>& gpu_channel
,
77 base::SingleThreadTaskRunner
* task_runner
) {
78 return new GpuChannelMessageQueue(gpu_channel
, task_runner
);
81 GpuChannelMessageQueue::GpuChannelMessageQueue(
82 const base::WeakPtr
<GpuChannel
>& gpu_channel
,
83 base::SingleThreadTaskRunner
* task_runner
)
85 unprocessed_order_num_(0),
86 processed_order_num_(0),
87 gpu_channel_(gpu_channel
),
88 task_runner_(task_runner
) {}
90 GpuChannelMessageQueue::~GpuChannelMessageQueue() {
91 DCHECK(channel_messages_
.empty());
92 DCHECK(out_of_order_messages_
.empty());
95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
96 base::AutoLock
auto_lock(channel_messages_lock_
);
97 return unprocessed_order_num_
;
100 void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number
,
101 const IPC::Message
& message
) {
102 base::AutoLock
auto_lock(channel_messages_lock_
);
105 make_scoped_ptr(new GpuChannelMessage(order_number
, message
)));
109 bool GpuChannelMessageQueue::GenerateSyncPointMessage(
110 gpu::SyncPointManager
* sync_point_manager
,
111 uint32_t order_number
,
112 const IPC::Message
& message
,
113 bool retire_sync_point
,
114 uint32_t* sync_point
) {
115 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID
, message
.type());
117 base::AutoLock
auto_lock(channel_messages_lock_
);
119 *sync_point
= sync_point_manager
->GenerateSyncPoint();
121 scoped_ptr
<GpuChannelMessage
> msg(
122 new GpuChannelMessage(order_number
, message
));
123 msg
->retire_sync_point
= retire_sync_point
;
124 msg
->sync_point
= *sync_point
;
126 PushMessageHelper(msg
.Pass());
132 bool GpuChannelMessageQueue::HasQueuedMessages() const {
133 base::AutoLock
auto_lock(channel_messages_lock_
);
134 return HasQueuedMessagesHelper();
137 base::TimeTicks
GpuChannelMessageQueue::GetNextMessageTimeTick() const {
138 base::AutoLock
auto_lock(channel_messages_lock_
);
140 base::TimeTicks next_message_tick
;
141 if (!channel_messages_
.empty())
142 next_message_tick
= channel_messages_
.front()->time_received
;
144 base::TimeTicks next_out_of_order_tick
;
145 if (!out_of_order_messages_
.empty())
146 next_out_of_order_tick
= out_of_order_messages_
.front()->time_received
;
148 if (next_message_tick
.is_null())
149 return next_out_of_order_tick
;
150 else if (next_out_of_order_tick
.is_null())
151 return next_message_tick
;
153 return std::min(next_message_tick
, next_out_of_order_tick
);
156 GpuChannelMessage
* GpuChannelMessageQueue::GetNextMessage() const {
157 base::AutoLock
auto_lock(channel_messages_lock_
);
158 if (!out_of_order_messages_
.empty()) {
159 DCHECK_EQ(out_of_order_messages_
.front()->order_number
, kOutOfOrderNumber
);
160 return out_of_order_messages_
.front();
161 } else if (!channel_messages_
.empty()) {
162 DCHECK_GT(channel_messages_
.front()->order_number
, processed_order_num_
);
163 DCHECK_LE(channel_messages_
.front()->order_number
, unprocessed_order_num_
);
164 return channel_messages_
.front();
170 bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number
) {
171 base::AutoLock
auto_lock(channel_messages_lock_
);
172 if (order_number
!= kOutOfOrderNumber
) {
173 DCHECK(!channel_messages_
.empty());
174 scoped_ptr
<GpuChannelMessage
> msg(channel_messages_
.front());
175 channel_messages_
.pop_front();
176 DCHECK_EQ(order_number
, msg
->order_number
);
177 processed_order_num_
= order_number
;
179 DCHECK(!out_of_order_messages_
.empty());
180 scoped_ptr
<GpuChannelMessage
> msg(out_of_order_messages_
.front());
181 out_of_order_messages_
.pop_front();
183 return HasQueuedMessagesHelper();
186 void GpuChannelMessageQueue::DeleteAndDisableMessages(
187 GpuChannelManager
* gpu_channel_manager
) {
189 base::AutoLock
auto_lock(channel_messages_lock_
);
194 // We guarantee that the queues will no longer be modified after enabled_
195 // is set to false, it is now safe to modify the queue without the lock.
196 // All public facing modifying functions check enabled_ while all
197 // private modifying functions DCHECK(enabled_) to enforce this.
198 while (!channel_messages_
.empty()) {
199 scoped_ptr
<GpuChannelMessage
> msg(channel_messages_
.front());
200 channel_messages_
.pop_front();
201 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and
202 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check
203 // if we have a sync point number here.
204 if (msg
->sync_point
) {
205 gpu_channel_manager
->sync_point_manager()->RetireSyncPoint(
209 STLDeleteElements(&out_of_order_messages_
);
212 void GpuChannelMessageQueue::ScheduleHandleMessage() {
213 task_runner_
->PostTask(FROM_HERE
,
214 base::Bind(&GpuChannel::HandleMessage
, gpu_channel_
));
217 void GpuChannelMessageQueue::PushMessageHelper(
218 scoped_ptr
<GpuChannelMessage
> msg
) {
219 channel_messages_lock_
.AssertAcquired();
221 bool had_messages
= HasQueuedMessagesHelper();
222 if (msg
->order_number
!= kOutOfOrderNumber
) {
223 unprocessed_order_num_
= msg
->order_number
;
224 channel_messages_
.push_back(msg
.release());
226 out_of_order_messages_
.push_back(msg
.release());
229 ScheduleHandleMessage();
232 bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const {
233 channel_messages_lock_
.AssertAcquired();
234 return !channel_messages_
.empty() || !out_of_order_messages_
.empty();
237 // Begin order numbers at 1 so 0 can mean no orders.
238 uint32_t GpuChannelMessageFilter::global_order_counter_
= 1;
240 GpuChannelMessageFilter::GpuChannelMessageFilter(
241 GpuChannelMessageQueue
* message_queue
,
242 gpu::SyncPointManager
* sync_point_manager
,
243 base::SingleThreadTaskRunner
* task_runner
,
244 bool future_sync_points
)
245 : preemption_state_(IDLE
),
246 message_queue_(message_queue
),
248 peer_pid_(base::kNullProcessId
),
249 sync_point_manager_(sync_point_manager
),
250 task_runner_(task_runner
),
251 a_stub_is_descheduled_(false),
252 future_sync_points_(future_sync_points
) {}
254 GpuChannelMessageFilter::~GpuChannelMessageFilter() {}
256 void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender
* sender
) {
259 timer_
= make_scoped_ptr(new base::OneShotTimer
<GpuChannelMessageFilter
>);
260 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
261 filter
->OnFilterAdded(sender_
);
265 void GpuChannelMessageFilter::OnFilterRemoved() {
267 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
268 filter
->OnFilterRemoved();
271 peer_pid_
= base::kNullProcessId
;
275 void GpuChannelMessageFilter::OnChannelConnected(int32 peer_pid
) {
276 DCHECK(peer_pid_
== base::kNullProcessId
);
277 peer_pid_
= peer_pid
;
278 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
279 filter
->OnChannelConnected(peer_pid
);
283 void GpuChannelMessageFilter::OnChannelError() {
284 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
285 filter
->OnChannelError();
289 void GpuChannelMessageFilter::OnChannelClosing() {
290 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
291 filter
->OnChannelClosing();
295 void GpuChannelMessageFilter::AddChannelFilter(
296 scoped_refptr
<IPC::MessageFilter
> filter
) {
297 channel_filters_
.push_back(filter
);
299 filter
->OnFilterAdded(sender_
);
300 if (peer_pid_
!= base::kNullProcessId
)
301 filter
->OnChannelConnected(peer_pid_
);
304 void GpuChannelMessageFilter::RemoveChannelFilter(
305 scoped_refptr
<IPC::MessageFilter
> filter
) {
307 filter
->OnFilterRemoved();
308 channel_filters_
.erase(
309 std::find(channel_filters_
.begin(), channel_filters_
.end(), filter
));
312 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message
& message
) {
314 for (scoped_refptr
<IPC::MessageFilter
>& filter
: channel_filters_
) {
315 if (filter
->OnMessageReceived(message
)) {
320 const uint32_t order_number
= global_order_counter_
++;
321 bool handled
= false;
322 if ((message
.type() == GpuCommandBufferMsg_RetireSyncPoint::ID
) &&
323 !future_sync_points_
) {
324 DLOG(ERROR
) << "Untrusted client should not send "
325 "GpuCommandBufferMsg_RetireSyncPoint message";
329 if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
330 base::Tuple
<bool> params
;
331 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
332 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message
,
334 reply
->set_reply_error();
338 bool retire_sync_point
= base::get
<0>(params
);
339 if (!future_sync_points_
&& !retire_sync_point
) {
340 LOG(ERROR
) << "Untrusted contexts can't create future sync points";
341 reply
->set_reply_error();
346 // Message queue must handle the entire sync point generation because the
347 // message queue could be disabled from the main thread during generation.
348 uint32_t sync_point
= 0u;
349 if (!message_queue_
->GenerateSyncPointMessage(
350 sync_point_manager_
, order_number
, message
, retire_sync_point
,
352 LOG(ERROR
) << "GpuChannel has been destroyed.";
353 reply
->set_reply_error();
358 DCHECK_NE(sync_point
, 0u);
359 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply
, sync_point
);
364 // Forward all other messages to the GPU Channel.
365 if (!handled
&& !message
.is_reply() && !message
.should_unblock()) {
366 if (message
.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID
||
367 message
.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID
) {
368 // Move Wait commands to the head of the queue, so the renderer
369 // doesn't have to wait any longer than necessary.
370 message_queue_
->PushBackMessage(kOutOfOrderNumber
, message
);
372 message_queue_
->PushBackMessage(order_number
, message
);
377 UpdatePreemptionState();
381 void GpuChannelMessageFilter::OnMessageProcessed() {
382 UpdatePreemptionState();
385 void GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState(
386 gpu::PreemptionFlag
* preempting_flag
,
387 bool a_stub_is_descheduled
) {
388 preempting_flag_
= preempting_flag
;
389 a_stub_is_descheduled_
= a_stub_is_descheduled
;
392 void GpuChannelMessageFilter::UpdateStubSchedulingState(
393 bool a_stub_is_descheduled
) {
394 a_stub_is_descheduled_
= a_stub_is_descheduled
;
395 UpdatePreemptionState();
398 bool GpuChannelMessageFilter::Send(IPC::Message
* message
) {
399 return sender_
->Send(message
);
402 void GpuChannelMessageFilter::UpdatePreemptionState() {
403 switch (preemption_state_
) {
405 if (preempting_flag_
.get() && message_queue_
->HasQueuedMessages())
406 TransitionToWaiting();
409 // A timer will transition us to CHECKING.
410 DCHECK(timer_
->IsRunning());
413 base::TimeTicks time_tick
= message_queue_
->GetNextMessageTimeTick();
414 if (!time_tick
.is_null()) {
415 base::TimeDelta time_elapsed
= base::TimeTicks::Now() - time_tick
;
416 if (time_elapsed
.InMilliseconds() < kPreemptWaitTimeMs
) {
417 // Schedule another check for when the IPC may go long.
418 timer_
->Start(FROM_HERE
,
419 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
) -
421 this, &GpuChannelMessageFilter::UpdatePreemptionState
);
423 if (a_stub_is_descheduled_
)
424 TransitionToWouldPreemptDescheduled();
426 TransitionToPreempting();
431 // A TransitionToIdle() timer should always be running in this state.
432 DCHECK(timer_
->IsRunning());
433 if (a_stub_is_descheduled_
)
434 TransitionToWouldPreemptDescheduled();
436 TransitionToIdleIfCaughtUp();
438 case WOULD_PREEMPT_DESCHEDULED
:
439 // A TransitionToIdle() timer should never be running in this state.
440 DCHECK(!timer_
->IsRunning());
441 if (!a_stub_is_descheduled_
)
442 TransitionToPreempting();
444 TransitionToIdleIfCaughtUp();
451 void GpuChannelMessageFilter::TransitionToIdleIfCaughtUp() {
452 DCHECK(preemption_state_
== PREEMPTING
||
453 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
454 base::TimeTicks next_tick
= message_queue_
->GetNextMessageTimeTick();
455 if (next_tick
.is_null()) {
458 base::TimeDelta time_elapsed
= base::TimeTicks::Now() - next_tick
;
459 if (time_elapsed
.InMilliseconds() < kStopPreemptThresholdMs
)
464 void GpuChannelMessageFilter::TransitionToIdle() {
465 DCHECK(preemption_state_
== PREEMPTING
||
466 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
467 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
470 preemption_state_
= IDLE
;
471 preempting_flag_
->Reset();
472 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
474 UpdatePreemptionState();
477 void GpuChannelMessageFilter::TransitionToWaiting() {
478 DCHECK_EQ(preemption_state_
, IDLE
);
479 DCHECK(!timer_
->IsRunning());
481 preemption_state_
= WAITING
;
482 timer_
->Start(FROM_HERE
,
483 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs
), this,
484 &GpuChannelMessageFilter::TransitionToChecking
);
487 void GpuChannelMessageFilter::TransitionToChecking() {
488 DCHECK_EQ(preemption_state_
, WAITING
);
489 DCHECK(!timer_
->IsRunning());
491 preemption_state_
= CHECKING
;
492 max_preemption_time_
= base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs
);
493 UpdatePreemptionState();
496 void GpuChannelMessageFilter::TransitionToPreempting() {
497 DCHECK(preemption_state_
== CHECKING
||
498 preemption_state_
== WOULD_PREEMPT_DESCHEDULED
);
499 DCHECK(!a_stub_is_descheduled_
);
501 // Stop any pending state update checks that we may have queued
503 if (preemption_state_
== CHECKING
)
506 preemption_state_
= PREEMPTING
;
507 preempting_flag_
->Set();
508 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
510 timer_
->Start(FROM_HERE
, max_preemption_time_
, this,
511 &GpuChannelMessageFilter::TransitionToIdle
);
513 UpdatePreemptionState();
516 void GpuChannelMessageFilter::TransitionToWouldPreemptDescheduled() {
517 DCHECK(preemption_state_
== CHECKING
|| preemption_state_
== PREEMPTING
);
518 DCHECK(a_stub_is_descheduled_
);
520 if (preemption_state_
== CHECKING
) {
521 // Stop any pending state update checks that we may have queued
525 // Stop any TransitionToIdle() timers that we may have queued
528 max_preemption_time_
= timer_
->desired_run_time() - base::TimeTicks::Now();
529 if (max_preemption_time_
< base::TimeDelta()) {
535 preemption_state_
= WOULD_PREEMPT_DESCHEDULED
;
536 preempting_flag_
->Reset();
537 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
539 UpdatePreemptionState();
542 GpuChannel::StreamState::StreamState(int32 id
, GpuStreamPriority priority
)
543 : id_(id
), priority_(priority
) {}
545 GpuChannel::StreamState::~StreamState() {}
547 void GpuChannel::StreamState::AddRoute(int32 route_id
) {
548 routes_
.insert(route_id
);
550 void GpuChannel::StreamState::RemoveRoute(int32 route_id
) {
551 routes_
.erase(route_id
);
554 bool GpuChannel::StreamState::HasRoute(int32 route_id
) const {
555 return routes_
.find(route_id
) != routes_
.end();
558 bool GpuChannel::StreamState::HasRoutes() const {
559 return !routes_
.empty();
562 GpuChannel::GpuChannel(GpuChannelManager
* gpu_channel_manager
,
563 GpuWatchdog
* watchdog
,
564 gfx::GLShareGroup
* share_group
,
565 gpu::gles2::MailboxManager
* mailbox
,
566 base::SingleThreadTaskRunner
* task_runner
,
567 base::SingleThreadTaskRunner
* io_task_runner
,
569 uint64_t client_tracing_id
,
571 bool allow_future_sync_points
,
572 bool allow_real_time_streams
)
573 : gpu_channel_manager_(gpu_channel_manager
),
574 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")),
575 client_id_(client_id
),
576 client_tracing_id_(client_tracing_id
),
577 task_runner_(task_runner
),
578 io_task_runner_(io_task_runner
),
579 share_group_(share_group
? share_group
: new gfx::GLShareGroup
),
580 mailbox_manager_(mailbox
581 ? scoped_refptr
<gpu::gles2::MailboxManager
>(mailbox
)
582 : gpu::gles2::MailboxManager::Create()),
583 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet
),
584 pending_valuebuffer_state_(new gpu::ValueStateMap
),
587 num_stubs_descheduled_(0),
588 allow_future_sync_points_(allow_future_sync_points
),
589 allow_real_time_streams_(allow_real_time_streams
),
590 weak_factory_(this) {
591 DCHECK(gpu_channel_manager
);
595 GpuChannelMessageQueue::Create(weak_factory_
.GetWeakPtr(), task_runner
);
597 filter_
= new GpuChannelMessageFilter(
598 message_queue_
.get(), gpu_channel_manager_
->sync_point_manager(),
599 task_runner_
.get(), allow_future_sync_points_
);
601 subscription_ref_set_
->AddObserver(this);
604 GpuChannel::~GpuChannel() {
605 // Clear stubs first because of dependencies.
608 message_queue_
->DeleteAndDisableMessages(gpu_channel_manager_
);
610 subscription_ref_set_
->RemoveObserver(this);
611 if (preempting_flag_
.get())
612 preempting_flag_
->Reset();
615 IPC::ChannelHandle
GpuChannel::Init(base::WaitableEvent
* shutdown_event
) {
616 DCHECK(shutdown_event
);
619 IPC::ChannelHandle
channel_handle(channel_id_
);
622 IPC::SyncChannel::Create(channel_handle
, IPC::Channel::MODE_SERVER
, this,
623 io_task_runner_
, false, shutdown_event
);
625 #if defined(OS_POSIX)
626 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so
627 // that it gets closed after it has been sent.
628 base::ScopedFD renderer_fd
= channel_
->TakeClientFileDescriptor();
629 DCHECK(renderer_fd
.is_valid());
630 channel_handle
.socket
= base::FileDescriptor(renderer_fd
.Pass());
633 channel_
->AddFilter(filter_
.get());
635 return channel_handle
;
638 base::ProcessId
GpuChannel::GetClientPID() const {
639 return channel_
->GetPeerPID();
642 uint32_t GpuChannel::GetProcessedOrderNum() const {
643 return message_queue_
->processed_order_num();
646 uint32_t GpuChannel::GetUnprocessedOrderNum() const {
647 return message_queue_
->GetUnprocessedOrderNum();
650 bool GpuChannel::OnMessageReceived(const IPC::Message
& message
) {
651 // All messages should be pushed to channel_messages_ and handled separately.
656 void GpuChannel::OnChannelError() {
657 gpu_channel_manager_
->RemoveChannel(client_id_
);
660 bool GpuChannel::Send(IPC::Message
* message
) {
661 // The GPU process must never send a synchronous IPC message to the renderer
662 // process. This could result in deadlock.
663 DCHECK(!message
->is_sync());
665 DVLOG(1) << "sending message @" << message
<< " on channel @" << this
666 << " with type " << message
->type();
673 return channel_
->Send(message
);
676 void GpuChannel::OnAddSubscription(unsigned int target
) {
677 gpu_channel_manager()->Send(
678 new GpuHostMsg_AddSubscription(client_id_
, target
));
681 void GpuChannel::OnRemoveSubscription(unsigned int target
) {
682 gpu_channel_manager()->Send(
683 new GpuHostMsg_RemoveSubscription(client_id_
, target
));
686 void GpuChannel::OnStubSchedulingChanged(GpuCommandBufferStub
* stub
,
688 bool a_stub_was_descheduled
= num_stubs_descheduled_
> 0;
690 num_stubs_descheduled_
--;
691 ScheduleHandleMessage();
693 num_stubs_descheduled_
++;
695 DCHECK_LE(num_stubs_descheduled_
, stubs_
.size());
696 bool a_stub_is_descheduled
= num_stubs_descheduled_
> 0;
698 if (a_stub_is_descheduled
!= a_stub_was_descheduled
) {
699 if (preempting_flag_
.get()) {
700 io_task_runner_
->PostTask(
702 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState
,
703 filter_
, a_stub_is_descheduled
));
708 CreateCommandBufferResult
GpuChannel::CreateViewCommandBuffer(
709 const gfx::GLSurfaceHandle
& window
,
711 const GPUCreateCommandBufferConfig
& init_params
,
714 "GpuChannel::CreateViewCommandBuffer",
718 int32 share_group_id
= init_params
.share_group_id
;
719 GpuCommandBufferStub
* share_group
= stubs_
.get(share_group_id
);
721 if (!share_group
&& share_group_id
!= MSG_ROUTING_NONE
)
722 return CREATE_COMMAND_BUFFER_FAILED
;
724 int32 stream_id
= init_params
.stream_id
;
725 GpuStreamPriority stream_priority
= init_params
.stream_priority
;
727 if (share_group
&& stream_id
!= share_group
->stream_id())
728 return CREATE_COMMAND_BUFFER_FAILED
;
730 if (!allow_real_time_streams_
&&
731 stream_priority
== GpuStreamPriority::REAL_TIME
)
732 return CREATE_COMMAND_BUFFER_FAILED
;
734 auto stream_it
= streams_
.find(stream_id
);
735 if (stream_it
!= streams_
.end() &&
736 stream_priority
!= GpuStreamPriority::INHERIT
&&
737 stream_priority
!= stream_it
->second
.priority()) {
738 return CREATE_COMMAND_BUFFER_FAILED
;
741 // Virtualize compositor contexts on OS X to prevent performance regressions
742 // when enabling FCM.
743 // http://crbug.com/180463
744 bool use_virtualized_gl_context
= false;
745 #if defined(OS_MACOSX)
746 use_virtualized_gl_context
= true;
749 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
750 this, task_runner_
.get(), share_group
, window
, mailbox_manager_
.get(),
751 subscription_ref_set_
.get(), pending_valuebuffer_state_
.get(),
752 gfx::Size(), disallowed_features_
, init_params
.attribs
,
753 init_params
.gpu_preference
, use_virtualized_gl_context
, stream_id
,
754 route_id
, surface_id
, watchdog_
, software_
, init_params
.active_url
));
756 if (preempted_flag_
.get())
757 stub
->SetPreemptByFlag(preempted_flag_
);
759 if (!router_
.AddRoute(route_id
, stub
.get())) {
760 DLOG(ERROR
) << "GpuChannel::CreateViewCommandBuffer(): "
761 "failed to add route";
762 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST
;
765 if (stream_it
!= streams_
.end()) {
766 stream_it
->second
.AddRoute(route_id
);
768 StreamState
stream(stream_id
, stream_priority
);
769 stream
.AddRoute(route_id
);
770 streams_
.insert(std::make_pair(stream_id
, stream
));
773 stubs_
.set(route_id
, stub
.Pass());
774 return CREATE_COMMAND_BUFFER_SUCCEEDED
;
777 GpuCommandBufferStub
* GpuChannel::LookupCommandBuffer(int32 route_id
) {
778 return stubs_
.get(route_id
);
781 void GpuChannel::LoseAllContexts() {
782 gpu_channel_manager_
->LoseAllContexts();
785 void GpuChannel::MarkAllContextsLost() {
786 for (auto& kv
: stubs_
)
787 kv
.second
->MarkContextLost();
790 bool GpuChannel::AddRoute(int32 route_id
, IPC::Listener
* listener
) {
791 return router_
.AddRoute(route_id
, listener
);
794 void GpuChannel::RemoveRoute(int32 route_id
) {
795 router_
.RemoveRoute(route_id
);
798 gpu::PreemptionFlag
* GpuChannel::GetPreemptionFlag() {
799 if (!preempting_flag_
.get()) {
800 preempting_flag_
= new gpu::PreemptionFlag
;
801 io_task_runner_
->PostTask(
804 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState
,
805 filter_
, preempting_flag_
, num_stubs_descheduled_
> 0));
807 return preempting_flag_
.get();
810 void GpuChannel::SetPreemptByFlag(
811 scoped_refptr
<gpu::PreemptionFlag
> preempted_flag
) {
812 preempted_flag_
= preempted_flag
;
814 for (auto& kv
: stubs_
)
815 kv
.second
->SetPreemptByFlag(preempted_flag_
);
818 void GpuChannel::OnDestroy() {
819 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
820 gpu_channel_manager_
->RemoveChannel(client_id_
);
823 bool GpuChannel::OnControlMessageReceived(const IPC::Message
& msg
) {
825 IPC_BEGIN_MESSAGE_MAP(GpuChannel
, msg
)
826 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer
,
827 OnCreateOffscreenCommandBuffer
)
828 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer
,
829 OnDestroyCommandBuffer
)
830 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder
,
832 IPC_MESSAGE_UNHANDLED(handled
= false)
833 IPC_END_MESSAGE_MAP()
834 DCHECK(handled
) << msg
.type();
838 void GpuChannel::HandleMessage() {
839 // If we have been preempted by another channel, just post a task to wake up.
840 if (preempted_flag_
&& preempted_flag_
->IsSet()) {
841 ScheduleHandleMessage();
845 GpuChannelMessage
* m
= message_queue_
->GetNextMessage();
847 // TODO(sunnyps): This could be a DCHECK maybe?
851 uint32_t order_number
= m
->order_number
;
852 const IPC::Message
& message
= m
->message
;
853 int32_t routing_id
= message
.routing_id();
854 GpuCommandBufferStub
* stub
= stubs_
.get(routing_id
);
856 DCHECK(!stub
|| stub
->IsScheduled());
858 DVLOG(1) << "received message @" << &message
<< " on channel @" << this
859 << " with type " << message
.type();
861 current_order_num_
= order_number
;
863 bool handled
= false;
865 if (routing_id
== MSG_ROUTING_CONTROL
) {
866 handled
= OnControlMessageReceived(message
);
867 } else if (message
.type() == GpuCommandBufferMsg_InsertSyncPoint::ID
) {
868 // TODO(dyen): Temporary handling of old sync points.
869 // This must ensure that the sync point will be retired. Normally we'll
870 // find the stub based on the routing ID, and associate the sync point
871 // with it, but if that fails for any reason (channel or stub already
872 // deleted, invalid routing id), we need to retire the sync point
875 stub
->AddSyncPoint(m
->sync_point
, m
->retire_sync_point
);
877 gpu_channel_manager_
->sync_point_manager()->RetireSyncPoint(
882 handled
= router_
.RouteMessage(message
);
885 // Respond to sync messages even if router failed to route.
886 if (!handled
&& message
.is_sync()) {
887 IPC::Message
* reply
= IPC::SyncMessage::GenerateReply(&message
);
888 reply
->set_reply_error();
893 // A command buffer may be descheduled or preempted but only in the middle of
894 // a flush. In this case we should not pop the message from the queue.
895 if (stub
&& stub
->HasUnprocessedCommands() &&
896 order_number
!= kOutOfOrderNumber
) {
897 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID
, message
.type());
898 // If the stub is still scheduled then we were preempted and need to
899 // schedule a wakeup otherwise some other event will wake us up e.g. sync
900 // point completion. No DCHECK for preemption flag because that can change
902 if (stub
->IsScheduled())
903 ScheduleHandleMessage();
907 if (message_queue_
->MessageProcessed(order_number
)) {
908 ScheduleHandleMessage();
911 if (preempting_flag_
) {
912 io_task_runner_
->PostTask(
914 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed
, filter_
));
918 void GpuChannel::ScheduleHandleMessage() {
919 task_runner_
->PostTask(FROM_HERE
, base::Bind(&GpuChannel::HandleMessage
,
920 weak_factory_
.GetWeakPtr()));
923 void GpuChannel::OnCreateOffscreenCommandBuffer(
924 const gfx::Size
& size
,
925 const GPUCreateCommandBufferConfig
& init_params
,
928 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id",
931 int32 share_group_id
= init_params
.share_group_id
;
932 GpuCommandBufferStub
* share_group
= stubs_
.get(share_group_id
);
934 if (!share_group
&& share_group_id
!= MSG_ROUTING_NONE
) {
939 int32 stream_id
= init_params
.stream_id
;
940 GpuStreamPriority stream_priority
= init_params
.stream_priority
;
942 if (share_group
&& stream_id
!= share_group
->stream_id()) {
947 if (!allow_real_time_streams_
&&
948 stream_priority
== GpuStreamPriority::REAL_TIME
) {
953 auto stream_it
= streams_
.find(stream_id
);
954 if (stream_it
!= streams_
.end() &&
955 stream_priority
!= GpuStreamPriority::INHERIT
&&
956 stream_priority
!= stream_it
->second
.priority()) {
961 scoped_ptr
<GpuCommandBufferStub
> stub(new GpuCommandBufferStub(
962 this, task_runner_
.get(), share_group
, gfx::GLSurfaceHandle(),
963 mailbox_manager_
.get(), subscription_ref_set_
.get(),
964 pending_valuebuffer_state_
.get(), size
, disallowed_features_
,
965 init_params
.attribs
, init_params
.gpu_preference
, false,
966 init_params
.stream_id
, route_id
, 0, watchdog_
, software_
,
967 init_params
.active_url
));
969 if (preempted_flag_
.get())
970 stub
->SetPreemptByFlag(preempted_flag_
);
972 if (!router_
.AddRoute(route_id
, stub
.get())) {
973 DLOG(ERROR
) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
974 "failed to add route";
979 if (stream_it
!= streams_
.end()) {
980 stream_it
->second
.AddRoute(route_id
);
982 StreamState
stream(stream_id
, stream_priority
);
983 stream
.AddRoute(route_id
);
984 streams_
.insert(std::make_pair(stream_id
, stream
));
987 stubs_
.set(route_id
, stub
.Pass());
991 void GpuChannel::OnDestroyCommandBuffer(int32 route_id
) {
992 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
993 "route_id", route_id
);
995 scoped_ptr
<GpuCommandBufferStub
> stub
= stubs_
.take_and_erase(route_id
);
1000 router_
.RemoveRoute(route_id
);
1002 int32 stream_id
= stub
->stream_id();
1003 auto stream_it
= streams_
.find(stream_id
);
1004 DCHECK(stream_it
!= streams_
.end());
1005 stream_it
->second
.RemoveRoute(route_id
);
1006 if (!stream_it
->second
.HasRoutes())
1007 streams_
.erase(stream_it
);
1009 // In case the renderer is currently blocked waiting for a sync reply from the
1010 // stub, we need to make sure to reschedule the GpuChannel here.
1011 if (!stub
->IsScheduled()) {
1012 // This stub won't get a chance to reschedule, so update the count now.
1013 OnStubSchedulingChanged(stub
.get(), true);
1017 void GpuChannel::OnCreateJpegDecoder(int32 route_id
, IPC::Message
* reply_msg
) {
1018 if (!jpeg_decoder_
) {
1019 jpeg_decoder_
.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_
));
1021 jpeg_decoder_
->AddClient(route_id
, reply_msg
);
1024 void GpuChannel::CacheShader(const std::string
& key
,
1025 const std::string
& shader
) {
1026 gpu_channel_manager_
->Send(
1027 new GpuHostMsg_CacheShader(client_id_
, key
, shader
));
1030 void GpuChannel::AddFilter(IPC::MessageFilter
* filter
) {
1031 io_task_runner_
->PostTask(
1032 FROM_HERE
, base::Bind(&GpuChannelMessageFilter::AddChannelFilter
,
1033 filter_
, make_scoped_refptr(filter
)));
1036 void GpuChannel::RemoveFilter(IPC::MessageFilter
* filter
) {
1037 io_task_runner_
->PostTask(
1038 FROM_HERE
, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter
,
1039 filter_
, make_scoped_refptr(filter
)));
1042 uint64
GpuChannel::GetMemoryUsage() {
1043 // Collect the unique memory trackers in use by the |stubs_|.
1044 std::set
<gpu::gles2::MemoryTracker
*> unique_memory_trackers
;
1045 for (auto& kv
: stubs_
)
1046 unique_memory_trackers
.insert(kv
.second
->GetMemoryTracker());
1048 // Sum the memory usage for all unique memory trackers.
1050 for (auto* tracker
: unique_memory_trackers
) {
1051 size
+= gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage(
1058 scoped_refptr
<gfx::GLImage
> GpuChannel::CreateImageForGpuMemoryBuffer(
1059 const gfx::GpuMemoryBufferHandle
& handle
,
1060 const gfx::Size
& size
,
1061 gfx::BufferFormat format
,
1062 uint32 internalformat
) {
1063 switch (handle
.type
) {
1064 case gfx::SHARED_MEMORY_BUFFER
: {
1065 scoped_refptr
<gfx::GLImageSharedMemory
> image(
1066 new gfx::GLImageSharedMemory(size
, internalformat
));
1067 if (!image
->Initialize(handle
, format
))
1068 return scoped_refptr
<gfx::GLImage
>();
1073 GpuChannelManager
* manager
= gpu_channel_manager();
1074 if (!manager
->gpu_memory_buffer_factory())
1075 return scoped_refptr
<gfx::GLImage
>();
1077 return manager
->gpu_memory_buffer_factory()
1079 ->CreateImageForGpuMemoryBuffer(handle
,
1088 void GpuChannel::HandleUpdateValueState(
1089 unsigned int target
, const gpu::ValueState
& state
) {
1090 pending_valuebuffer_state_
->UpdateState(target
, state
);
1093 } // namespace content