Pin Chrome's shortcut to the Win10 Start menu on install and OS upgrade.
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.cc
blob25264740f0abe2c4a685b1880d41049df2be72ec
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
9 #include "content/common/gpu/gpu_channel.h"
11 #include <algorithm>
12 #include <queue>
13 #include <vector>
15 #include "base/bind.h"
16 #include "base/command_line.h"
17 #include "base/location.h"
18 #include "base/single_thread_task_runner.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_util.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/timer/timer.h"
23 #include "base/trace_event/memory_dump_manager.h"
24 #include "base/trace_event/process_memory_dump.h"
25 #include "base/trace_event/trace_event.h"
26 #include "content/common/gpu/gpu_channel_manager.h"
27 #include "content/common/gpu/gpu_memory_buffer_factory.h"
28 #include "content/common/gpu/gpu_messages.h"
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
30 #include "content/public/common/content_switches.h"
31 #include "gpu/command_buffer/common/mailbox.h"
32 #include "gpu/command_buffer/common/value_state.h"
33 #include "gpu/command_buffer/service/gpu_scheduler.h"
34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/valuebuffer_manager.h"
38 #include "ipc/ipc_channel.h"
39 #include "ipc/message_filter.h"
40 #include "ui/gl/gl_context.h"
41 #include "ui/gl/gl_image_shared_memory.h"
42 #include "ui/gl/gl_surface.h"
44 #if defined(OS_POSIX)
45 #include "ipc/ipc_channel_posix.h"
46 #endif
48 namespace content {
49 namespace {
51 // Number of milliseconds between successive vsync. Many GL commands block
52 // on vsync, so thresholds for preemption should be multiples of this.
53 const int64 kVsyncIntervalMs = 17;
55 // Amount of time that we will wait for an IPC to be processed before
56 // preempting. After a preemption, we must wait this long before triggering
57 // another preemption.
58 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
60 // Once we trigger a preemption, the maximum duration that we will wait
61 // before clearing the preemption.
62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
64 // Stop the preemption once the time for the longest pending IPC drops
65 // below this threshold.
66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
68 } // anonymous namespace
70 // This filter does three things:
71 // - it counts and timestamps each message forwarded to the channel
72 // so that we can preempt other channels if a message takes too long to
73 // process. To guarantee fairness, we must wait a minimum amount of time
74 // before preempting and we limit the amount of time that we can preempt in
75 // one shot (see constants above).
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
77 // thread, generating the sync point ID and responding immediately, and then
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
79 // into the channel's queue.
80 // - it generates mailbox names for clients of the GPU process on the IO thread.
81 class GpuChannelMessageFilter : public IPC::MessageFilter {
82 public:
83 GpuChannelMessageFilter(
84 base::WeakPtr<GpuChannel> gpu_channel,
85 gpu::SyncPointManager* sync_point_manager,
86 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
87 bool future_sync_points)
88 : preemption_state_(IDLE),
89 gpu_channel_(gpu_channel),
90 sender_(NULL),
91 sync_point_manager_(sync_point_manager),
92 task_runner_(task_runner),
93 messages_forwarded_to_channel_(0),
94 a_stub_is_descheduled_(false),
95 future_sync_points_(future_sync_points) {}
97 void OnFilterAdded(IPC::Sender* sender) override {
98 DCHECK(!sender_);
99 sender_ = sender;
102 void OnFilterRemoved() override {
103 DCHECK(sender_);
104 sender_ = NULL;
107 bool OnMessageReceived(const IPC::Message& message) override {
108 DCHECK(sender_);
110 bool handled = false;
111 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
112 !future_sync_points_) {
113 DLOG(ERROR) << "Untrusted client should not send "
114 "GpuCommandBufferMsg_RetireSyncPoint message";
115 return true;
118 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
119 base::Tuple<bool> retire;
120 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
121 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
122 &retire)) {
123 reply->set_reply_error();
124 Send(reply);
125 return true;
127 if (!future_sync_points_ && !base::get<0>(retire)) {
128 LOG(ERROR) << "Untrusted contexts can't create future sync points";
129 reply->set_reply_error();
130 Send(reply);
131 return true;
133 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
134 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
135 Send(reply);
136 task_runner_->PostTask(
137 FROM_HERE,
138 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
139 gpu_channel_, sync_point_manager_, message.routing_id(),
140 base::get<0>(retire), sync_point));
141 handled = true;
144 // These are handled by GpuJpegDecodeAccelerator and
145 // GpuVideoDecodeAccelerator.
146 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by
147 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we
148 // don't need to exclude them one by one here.
149 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID ||
150 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID ||
151 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) {
152 return false;
155 // All other messages get processed by the GpuChannel.
156 messages_forwarded_to_channel_++;
157 if (preempting_flag_.get())
158 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
159 UpdatePreemptionState();
161 return handled;
164 void MessageProcessed(uint64 messages_processed) {
165 while (!pending_messages_.empty() &&
166 pending_messages_.front().message_number <= messages_processed)
167 pending_messages_.pop();
168 UpdatePreemptionState();
171 void SetPreemptingFlagAndSchedulingState(
172 gpu::PreemptionFlag* preempting_flag,
173 bool a_stub_is_descheduled) {
174 preempting_flag_ = preempting_flag;
175 a_stub_is_descheduled_ = a_stub_is_descheduled;
178 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
179 a_stub_is_descheduled_ = a_stub_is_descheduled;
180 UpdatePreemptionState();
183 bool Send(IPC::Message* message) {
184 return sender_->Send(message);
187 protected:
188 ~GpuChannelMessageFilter() override {}
190 private:
191 enum PreemptionState {
192 // Either there's no other channel to preempt, there are no messages
193 // pending processing, or we just finished preempting and have to wait
194 // before preempting again.
195 IDLE,
196 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
197 WAITING,
198 // We can preempt whenever any IPC processing takes more than
199 // kPreemptWaitTimeMs.
200 CHECKING,
201 // We are currently preempting (i.e. no stub is descheduled).
202 PREEMPTING,
203 // We would like to preempt, but some stub is descheduled.
204 WOULD_PREEMPT_DESCHEDULED,
207 PreemptionState preemption_state_;
209 // Maximum amount of time that we can spend in PREEMPTING.
210 // It is reset when we transition to IDLE.
211 base::TimeDelta max_preemption_time_;
213 struct PendingMessage {
214 uint64 message_number;
215 base::TimeTicks time_received;
217 explicit PendingMessage(uint64 message_number)
218 : message_number(message_number),
219 time_received(base::TimeTicks::Now()) {
223 void UpdatePreemptionState() {
224 switch (preemption_state_) {
225 case IDLE:
226 if (preempting_flag_.get() && !pending_messages_.empty())
227 TransitionToWaiting();
228 break;
229 case WAITING:
230 // A timer will transition us to CHECKING.
231 DCHECK(timer_.IsRunning());
232 break;
233 case CHECKING:
234 if (!pending_messages_.empty()) {
235 base::TimeDelta time_elapsed =
236 base::TimeTicks::Now() - pending_messages_.front().time_received;
237 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
238 // Schedule another check for when the IPC may go long.
239 timer_.Start(
240 FROM_HERE,
241 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
242 time_elapsed,
243 this, &GpuChannelMessageFilter::UpdatePreemptionState);
244 } else {
245 if (a_stub_is_descheduled_)
246 TransitionToWouldPreemptDescheduled();
247 else
248 TransitionToPreempting();
251 break;
252 case PREEMPTING:
253 // A TransitionToIdle() timer should always be running in this state.
254 DCHECK(timer_.IsRunning());
255 if (a_stub_is_descheduled_)
256 TransitionToWouldPreemptDescheduled();
257 else
258 TransitionToIdleIfCaughtUp();
259 break;
260 case WOULD_PREEMPT_DESCHEDULED:
261 // A TransitionToIdle() timer should never be running in this state.
262 DCHECK(!timer_.IsRunning());
263 if (!a_stub_is_descheduled_)
264 TransitionToPreempting();
265 else
266 TransitionToIdleIfCaughtUp();
267 break;
268 default:
269 NOTREACHED();
273 void TransitionToIdleIfCaughtUp() {
274 DCHECK(preemption_state_ == PREEMPTING ||
275 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
276 if (pending_messages_.empty()) {
277 TransitionToIdle();
278 } else {
279 base::TimeDelta time_elapsed =
280 base::TimeTicks::Now() - pending_messages_.front().time_received;
281 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
282 TransitionToIdle();
286 void TransitionToIdle() {
287 DCHECK(preemption_state_ == PREEMPTING ||
288 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
289 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
290 timer_.Stop();
292 preemption_state_ = IDLE;
293 preempting_flag_->Reset();
294 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
296 UpdatePreemptionState();
299 void TransitionToWaiting() {
300 DCHECK_EQ(preemption_state_, IDLE);
301 DCHECK(!timer_.IsRunning());
303 preemption_state_ = WAITING;
304 timer_.Start(
305 FROM_HERE,
306 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
307 this, &GpuChannelMessageFilter::TransitionToChecking);
310 void TransitionToChecking() {
311 DCHECK_EQ(preemption_state_, WAITING);
312 DCHECK(!timer_.IsRunning());
314 preemption_state_ = CHECKING;
315 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
316 UpdatePreemptionState();
319 void TransitionToPreempting() {
320 DCHECK(preemption_state_ == CHECKING ||
321 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
322 DCHECK(!a_stub_is_descheduled_);
324 // Stop any pending state update checks that we may have queued
325 // while CHECKING.
326 if (preemption_state_ == CHECKING)
327 timer_.Stop();
329 preemption_state_ = PREEMPTING;
330 preempting_flag_->Set();
331 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
333 timer_.Start(
334 FROM_HERE,
335 max_preemption_time_,
336 this, &GpuChannelMessageFilter::TransitionToIdle);
338 UpdatePreemptionState();
341 void TransitionToWouldPreemptDescheduled() {
342 DCHECK(preemption_state_ == CHECKING ||
343 preemption_state_ == PREEMPTING);
344 DCHECK(a_stub_is_descheduled_);
346 if (preemption_state_ == CHECKING) {
347 // Stop any pending state update checks that we may have queued
348 // while CHECKING.
349 timer_.Stop();
350 } else {
351 // Stop any TransitionToIdle() timers that we may have queued
352 // while PREEMPTING.
353 timer_.Stop();
354 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
355 if (max_preemption_time_ < base::TimeDelta()) {
356 TransitionToIdle();
357 return;
361 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
362 preempting_flag_->Reset();
363 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
365 UpdatePreemptionState();
368 static void InsertSyncPointOnMainThread(
369 base::WeakPtr<GpuChannel> gpu_channel,
370 gpu::SyncPointManager* manager,
371 int32 routing_id,
372 bool retire,
373 uint32 sync_point) {
374 // This function must ensure that the sync point will be retired. Normally
375 // we'll find the stub based on the routing ID, and associate the sync point
376 // with it, but if that fails for any reason (channel or stub already
377 // deleted, invalid routing id), we need to retire the sync point
378 // immediately.
379 if (gpu_channel) {
380 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
381 if (stub) {
382 stub->AddSyncPoint(sync_point);
383 if (retire) {
384 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
385 gpu_channel->OnMessageReceived(message);
387 return;
388 } else {
389 gpu_channel->MessageProcessed();
392 manager->RetireSyncPoint(sync_point);
395 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
396 // passed through - therefore the WeakPtr assumptions are respected.
397 base::WeakPtr<GpuChannel> gpu_channel_;
398 IPC::Sender* sender_;
399 gpu::SyncPointManager* sync_point_manager_;
400 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
401 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
403 std::queue<PendingMessage> pending_messages_;
405 // Count of the number of IPCs forwarded to the GpuChannel.
406 uint64 messages_forwarded_to_channel_;
408 base::OneShotTimer<GpuChannelMessageFilter> timer_;
410 bool a_stub_is_descheduled_;
412 // True if this channel can create future sync points.
413 bool future_sync_points_;
416 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
417 GpuWatchdog* watchdog,
418 gfx::GLShareGroup* share_group,
419 gpu::gles2::MailboxManager* mailbox,
420 int client_id,
421 bool software,
422 bool allow_future_sync_points)
423 : gpu_channel_manager_(gpu_channel_manager),
424 messages_processed_(0),
425 client_id_(client_id),
426 share_group_(share_group ? share_group : new gfx::GLShareGroup),
427 mailbox_manager_(mailbox
428 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox)
429 : gpu::gles2::MailboxManager::Create()),
430 watchdog_(watchdog),
431 software_(software),
432 handle_messages_scheduled_(false),
433 currently_processing_message_(NULL),
434 num_stubs_descheduled_(0),
435 allow_future_sync_points_(allow_future_sync_points),
436 weak_factory_(this) {
437 DCHECK(gpu_channel_manager);
438 DCHECK(client_id);
440 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
441 const base::CommandLine* command_line =
442 base::CommandLine::ForCurrentProcess();
443 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
445 subscription_ref_set_ = new gpu::gles2::SubscriptionRefSet();
446 subscription_ref_set_->AddObserver(this);
449 GpuChannel::~GpuChannel() {
450 STLDeleteElements(&deferred_messages_);
451 subscription_ref_set_->RemoveObserver(this);
452 if (preempting_flag_.get())
453 preempting_flag_->Reset();
455 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
456 this);
459 void GpuChannel::Init(base::SingleThreadTaskRunner* io_task_runner,
460 base::WaitableEvent* shutdown_event,
461 IPC::AttachmentBroker* broker) {
462 DCHECK(!channel_.get());
464 // Map renderer ID to a (single) channel to that process.
465 channel_ =
466 IPC::SyncChannel::Create(channel_id_, IPC::Channel::MODE_SERVER, this,
467 io_task_runner, false, shutdown_event, broker);
469 filter_ = new GpuChannelMessageFilter(
470 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(),
471 base::ThreadTaskRunnerHandle::Get(), allow_future_sync_points_);
472 io_task_runner_ = io_task_runner;
473 channel_->AddFilter(filter_.get());
474 pending_valuebuffer_state_ = new gpu::ValueStateMap();
476 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
477 this, base::ThreadTaskRunnerHandle::Get());
480 std::string GpuChannel::GetChannelName() {
481 return channel_id_;
484 #if defined(OS_POSIX)
485 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() {
486 if (!channel_) {
487 NOTREACHED();
488 return base::ScopedFD();
490 return channel_->TakeClientFileDescriptor();
492 #endif // defined(OS_POSIX)
494 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
495 if (log_messages_) {
496 DVLOG(1) << "received message @" << &message << " on channel @" << this
497 << " with type " << message.type();
500 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
501 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
502 // Move Wait commands to the head of the queue, so the renderer
503 // doesn't have to wait any longer than necessary.
504 deferred_messages_.push_front(new IPC::Message(message));
505 } else {
506 deferred_messages_.push_back(new IPC::Message(message));
509 OnScheduled();
511 return true;
514 void GpuChannel::OnChannelError() {
515 gpu_channel_manager_->RemoveChannel(client_id_);
518 bool GpuChannel::Send(IPC::Message* message) {
519 // The GPU process must never send a synchronous IPC message to the renderer
520 // process. This could result in deadlock.
521 DCHECK(!message->is_sync());
522 if (log_messages_) {
523 DVLOG(1) << "sending message @" << message << " on channel @" << this
524 << " with type " << message->type();
527 if (!channel_) {
528 delete message;
529 return false;
532 return channel_->Send(message);
535 void GpuChannel::OnAddSubscription(unsigned int target) {
536 gpu_channel_manager()->Send(
537 new GpuHostMsg_AddSubscription(client_id_, target));
540 void GpuChannel::OnRemoveSubscription(unsigned int target) {
541 gpu_channel_manager()->Send(
542 new GpuHostMsg_RemoveSubscription(client_id_, target));
545 void GpuChannel::RequeueMessage() {
546 DCHECK(currently_processing_message_);
547 deferred_messages_.push_front(
548 new IPC::Message(*currently_processing_message_));
549 messages_processed_--;
550 currently_processing_message_ = NULL;
553 void GpuChannel::OnScheduled() {
554 if (handle_messages_scheduled_)
555 return;
556 // Post a task to handle any deferred messages. The deferred message queue is
557 // not emptied here, which ensures that OnMessageReceived will continue to
558 // defer newly received messages until the ones in the queue have all been
559 // handled by HandleMessage. HandleMessage is invoked as a
560 // task to prevent reentrancy.
561 base::ThreadTaskRunnerHandle::Get()->PostTask(
562 FROM_HERE,
563 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
564 handle_messages_scheduled_ = true;
567 void GpuChannel::StubSchedulingChanged(bool scheduled) {
568 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
569 if (scheduled) {
570 num_stubs_descheduled_--;
571 OnScheduled();
572 } else {
573 num_stubs_descheduled_++;
575 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
576 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
578 if (a_stub_is_descheduled != a_stub_was_descheduled) {
579 if (preempting_flag_.get()) {
580 io_task_runner_->PostTask(
581 FROM_HERE,
582 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
583 filter_, a_stub_is_descheduled));
588 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
589 const gfx::GLSurfaceHandle& window,
590 int32 surface_id,
591 const GPUCreateCommandBufferConfig& init_params,
592 int32 route_id) {
593 TRACE_EVENT1("gpu",
594 "GpuChannel::CreateViewCommandBuffer",
595 "surface_id",
596 surface_id);
598 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
600 // Virtualize compositor contexts on OS X to prevent performance regressions
601 // when enabling FCM.
602 // http://crbug.com/180463
603 bool use_virtualized_gl_context = false;
604 #if defined(OS_MACOSX)
605 use_virtualized_gl_context = true;
606 #endif
608 scoped_ptr<GpuCommandBufferStub> stub(
609 new GpuCommandBufferStub(this,
610 share_group,
611 window,
612 mailbox_manager_.get(),
613 subscription_ref_set_.get(),
614 pending_valuebuffer_state_.get(),
615 gfx::Size(),
616 disallowed_features_,
617 init_params.attribs,
618 init_params.gpu_preference,
619 use_virtualized_gl_context,
620 route_id,
621 surface_id,
622 watchdog_,
623 software_,
624 init_params.active_url));
625 if (preempted_flag_.get())
626 stub->SetPreemptByFlag(preempted_flag_);
627 if (!router_.AddRoute(route_id, stub.get())) {
628 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
629 "failed to add route";
630 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
632 stubs_.AddWithID(stub.release(), route_id);
633 return CREATE_COMMAND_BUFFER_SUCCEEDED;
636 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
637 return stubs_.Lookup(route_id);
640 void GpuChannel::LoseAllContexts() {
641 gpu_channel_manager_->LoseAllContexts();
644 void GpuChannel::MarkAllContextsLost() {
645 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
646 !it.IsAtEnd(); it.Advance()) {
647 it.GetCurrentValue()->MarkContextLost();
651 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
652 return router_.AddRoute(route_id, listener);
655 void GpuChannel::RemoveRoute(int32 route_id) {
656 router_.RemoveRoute(route_id);
659 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
660 if (!preempting_flag_.get()) {
661 preempting_flag_ = new gpu::PreemptionFlag;
662 io_task_runner_->PostTask(
663 FROM_HERE,
664 base::Bind(
665 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
666 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
668 return preempting_flag_.get();
671 void GpuChannel::SetPreemptByFlag(
672 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
673 preempted_flag_ = preempted_flag;
675 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
676 !it.IsAtEnd(); it.Advance()) {
677 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
681 void GpuChannel::OnDestroy() {
682 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
683 gpu_channel_manager_->RemoveChannel(client_id_);
686 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
687 bool handled = true;
688 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
689 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
690 OnCreateOffscreenCommandBuffer)
691 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
692 OnDestroyCommandBuffer)
693 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder,
694 OnCreateJpegDecoder)
695 IPC_MESSAGE_UNHANDLED(handled = false)
696 IPC_END_MESSAGE_MAP()
697 DCHECK(handled) << msg.type();
698 return handled;
701 void GpuChannel::HandleMessage() {
702 handle_messages_scheduled_ = false;
703 if (deferred_messages_.empty())
704 return;
706 IPC::Message* m = NULL;
707 GpuCommandBufferStub* stub = NULL;
709 m = deferred_messages_.front();
710 stub = stubs_.Lookup(m->routing_id());
711 if (stub) {
712 if (!stub->IsScheduled())
713 return;
714 if (stub->IsPreempted()) {
715 OnScheduled();
716 return;
720 scoped_ptr<IPC::Message> message(m);
721 deferred_messages_.pop_front();
722 bool message_processed = true;
724 currently_processing_message_ = message.get();
725 bool result;
726 if (message->routing_id() == MSG_ROUTING_CONTROL)
727 result = OnControlMessageReceived(*message);
728 else
729 result = router_.RouteMessage(*message);
730 currently_processing_message_ = NULL;
732 if (!result) {
733 // Respond to sync messages even if router failed to route.
734 if (message->is_sync()) {
735 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
736 reply->set_reply_error();
737 Send(reply);
739 } else {
740 // If the command buffer becomes unscheduled as a result of handling the
741 // message but still has more commands to process, synthesize an IPC
742 // message to flush that command buffer.
743 if (stub) {
744 if (stub->HasUnprocessedCommands()) {
745 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
746 stub->route_id()));
747 message_processed = false;
751 if (message_processed)
752 MessageProcessed();
754 if (!deferred_messages_.empty()) {
755 OnScheduled();
759 void GpuChannel::OnCreateOffscreenCommandBuffer(
760 const gfx::Size& size,
761 const GPUCreateCommandBufferConfig& init_params,
762 int32 route_id,
763 bool* succeeded) {
764 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
765 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
767 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
768 this,
769 share_group,
770 gfx::GLSurfaceHandle(),
771 mailbox_manager_.get(),
772 subscription_ref_set_.get(),
773 pending_valuebuffer_state_.get(),
774 size,
775 disallowed_features_,
776 init_params.attribs,
777 init_params.gpu_preference,
778 false,
779 route_id,
781 watchdog_,
782 software_,
783 init_params.active_url));
784 if (preempted_flag_.get())
785 stub->SetPreemptByFlag(preempted_flag_);
786 if (!router_.AddRoute(route_id, stub.get())) {
787 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
788 "failed to add route";
789 *succeeded = false;
790 return;
792 stubs_.AddWithID(stub.release(), route_id);
793 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
794 "route_id", route_id);
795 *succeeded = true;
798 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
799 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
800 "route_id", route_id);
802 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
803 if (!stub)
804 return;
805 bool need_reschedule = (stub && !stub->IsScheduled());
806 router_.RemoveRoute(route_id);
807 stubs_.Remove(route_id);
808 // In case the renderer is currently blocked waiting for a sync reply from the
809 // stub, we need to make sure to reschedule the GpuChannel here.
810 if (need_reschedule) {
811 // This stub won't get a chance to reschedule, so update the count now.
812 StubSchedulingChanged(true);
816 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) {
817 if (!jpeg_decoder_) {
818 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_));
820 jpeg_decoder_->AddClient(route_id, reply_msg);
823 void GpuChannel::MessageProcessed() {
824 messages_processed_++;
825 if (preempting_flag_.get()) {
826 io_task_runner_->PostTask(
827 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed,
828 filter_, messages_processed_));
832 void GpuChannel::CacheShader(const std::string& key,
833 const std::string& shader) {
834 gpu_channel_manager_->Send(
835 new GpuHostMsg_CacheShader(client_id_, key, shader));
838 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
839 channel_->AddFilter(filter);
842 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
843 channel_->RemoveFilter(filter);
846 uint64 GpuChannel::GetMemoryUsage() {
847 // Collect the unique memory trackers in use by the |stubs_|.
848 std::set<gpu::gles2::MemoryTracker*> unique_memory_trackers;
849 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
850 !it.IsAtEnd(); it.Advance()) {
851 unique_memory_trackers.insert(it.GetCurrentValue()->GetMemoryTracker());
854 // Sum the memory usage for all unique memory trackers.
855 uint64 size = 0;
856 for (auto* tracker : unique_memory_trackers) {
857 size += gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage(
858 tracker);
861 return size;
864 scoped_refptr<gfx::GLImage> GpuChannel::CreateImageForGpuMemoryBuffer(
865 const gfx::GpuMemoryBufferHandle& handle,
866 const gfx::Size& size,
867 gfx::GpuMemoryBuffer::Format format,
868 uint32 internalformat) {
869 switch (handle.type) {
870 case gfx::SHARED_MEMORY_BUFFER: {
871 scoped_refptr<gfx::GLImageSharedMemory> image(
872 new gfx::GLImageSharedMemory(size, internalformat));
873 if (!image->Initialize(handle, format))
874 return scoped_refptr<gfx::GLImage>();
876 return image;
878 default: {
879 GpuChannelManager* manager = gpu_channel_manager();
880 if (!manager->gpu_memory_buffer_factory())
881 return scoped_refptr<gfx::GLImage>();
883 return manager->gpu_memory_buffer_factory()
884 ->AsImageFactory()
885 ->CreateImageForGpuMemoryBuffer(handle,
886 size,
887 format,
888 internalformat,
889 client_id_);
894 void GpuChannel::HandleUpdateValueState(
895 unsigned int target, const gpu::ValueState& state) {
896 pending_valuebuffer_state_->UpdateState(target, state);
899 bool GpuChannel::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd) {
900 auto dump_name = GetChannelName();
901 std::replace(dump_name.begin(), dump_name.end(), '.', '_');
903 base::trace_event::MemoryAllocatorDump* dump =
904 pmd->CreateAllocatorDump(base::StringPrintf("gl/%s", dump_name.c_str()));
906 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
907 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
908 GetMemoryUsage());
910 return true;
913 } // namespace content