Add P2PDatagramSocket and P2PStreamSocket interfaces.
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.cc
blob3cd18db4c4f07816743f78481c09bb353d95e1f4
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
9 #include "content/common/gpu/gpu_channel.h"
11 #include <algorithm>
12 #include <queue>
13 #include <vector>
15 #include "base/bind.h"
16 #include "base/command_line.h"
17 #include "base/location.h"
18 #include "base/single_thread_task_runner.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_util.h"
21 #include "base/thread_task_runner_handle.h"
22 #include "base/timer/timer.h"
23 #include "base/trace_event/memory_dump_manager.h"
24 #include "base/trace_event/process_memory_dump.h"
25 #include "base/trace_event/trace_event.h"
26 #include "content/common/gpu/gpu_channel_manager.h"
27 #include "content/common/gpu/gpu_memory_buffer_factory.h"
28 #include "content/common/gpu/gpu_messages.h"
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
30 #include "content/public/common/content_switches.h"
31 #include "gpu/command_buffer/common/mailbox.h"
32 #include "gpu/command_buffer/common/value_state.h"
33 #include "gpu/command_buffer/service/gpu_scheduler.h"
34 #include "gpu/command_buffer/service/image_factory.h"
35 #include "gpu/command_buffer/service/mailbox_manager.h"
36 #include "gpu/command_buffer/service/sync_point_manager.h"
37 #include "gpu/command_buffer/service/valuebuffer_manager.h"
38 #include "ipc/ipc_channel.h"
39 #include "ipc/message_filter.h"
40 #include "ui/gl/gl_context.h"
41 #include "ui/gl/gl_image_shared_memory.h"
42 #include "ui/gl/gl_surface.h"
44 #if defined(OS_POSIX)
45 #include "ipc/ipc_channel_posix.h"
46 #endif
48 namespace content {
49 namespace {
51 // Number of milliseconds between successive vsync. Many GL commands block
52 // on vsync, so thresholds for preemption should be multiples of this.
53 const int64 kVsyncIntervalMs = 17;
55 // Amount of time that we will wait for an IPC to be processed before
56 // preempting. After a preemption, we must wait this long before triggering
57 // another preemption.
58 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
60 // Once we trigger a preemption, the maximum duration that we will wait
61 // before clearing the preemption.
62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
64 // Stop the preemption once the time for the longest pending IPC drops
65 // below this threshold.
66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
68 } // anonymous namespace
70 // This filter does three things:
71 // - it counts and timestamps each message forwarded to the channel
72 // so that we can preempt other channels if a message takes too long to
73 // process. To guarantee fairness, we must wait a minimum amount of time
74 // before preempting and we limit the amount of time that we can preempt in
75 // one shot (see constants above).
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
77 // thread, generating the sync point ID and responding immediately, and then
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
79 // into the channel's queue.
80 // - it generates mailbox names for clients of the GPU process on the IO thread.
81 class GpuChannelMessageFilter : public IPC::MessageFilter {
82 public:
83 GpuChannelMessageFilter(
84 base::WeakPtr<GpuChannel> gpu_channel,
85 scoped_refptr<gpu::SyncPointManager> sync_point_manager,
86 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
87 bool future_sync_points)
88 : preemption_state_(IDLE),
89 gpu_channel_(gpu_channel),
90 sender_(NULL),
91 sync_point_manager_(sync_point_manager),
92 task_runner_(task_runner),
93 messages_forwarded_to_channel_(0),
94 a_stub_is_descheduled_(false),
95 future_sync_points_(future_sync_points) {}
97 void OnFilterAdded(IPC::Sender* sender) override {
98 DCHECK(!sender_);
99 sender_ = sender;
102 void OnFilterRemoved() override {
103 DCHECK(sender_);
104 sender_ = NULL;
107 bool OnMessageReceived(const IPC::Message& message) override {
108 DCHECK(sender_);
110 bool handled = false;
111 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
112 !future_sync_points_) {
113 DLOG(ERROR) << "Untrusted client should not send "
114 "GpuCommandBufferMsg_RetireSyncPoint message";
115 return true;
118 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
119 base::Tuple<bool> retire;
120 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
121 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
122 &retire)) {
123 reply->set_reply_error();
124 Send(reply);
125 return true;
127 if (!future_sync_points_ && !base::get<0>(retire)) {
128 LOG(ERROR) << "Untrusted contexts can't create future sync points";
129 reply->set_reply_error();
130 Send(reply);
131 return true;
133 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
134 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
135 Send(reply);
136 task_runner_->PostTask(
137 FROM_HERE,
138 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
139 gpu_channel_, sync_point_manager_, message.routing_id(),
140 base::get<0>(retire), sync_point));
141 handled = true;
144 // All other messages get processed by the GpuChannel.
145 messages_forwarded_to_channel_++;
146 if (preempting_flag_.get())
147 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
148 UpdatePreemptionState();
150 return handled;
153 void MessageProcessed(uint64 messages_processed) {
154 while (!pending_messages_.empty() &&
155 pending_messages_.front().message_number <= messages_processed)
156 pending_messages_.pop();
157 UpdatePreemptionState();
160 void SetPreemptingFlagAndSchedulingState(
161 gpu::PreemptionFlag* preempting_flag,
162 bool a_stub_is_descheduled) {
163 preempting_flag_ = preempting_flag;
164 a_stub_is_descheduled_ = a_stub_is_descheduled;
167 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
168 a_stub_is_descheduled_ = a_stub_is_descheduled;
169 UpdatePreemptionState();
172 bool Send(IPC::Message* message) {
173 return sender_->Send(message);
176 protected:
177 ~GpuChannelMessageFilter() override {}
179 private:
180 enum PreemptionState {
181 // Either there's no other channel to preempt, there are no messages
182 // pending processing, or we just finished preempting and have to wait
183 // before preempting again.
184 IDLE,
185 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
186 WAITING,
187 // We can preempt whenever any IPC processing takes more than
188 // kPreemptWaitTimeMs.
189 CHECKING,
190 // We are currently preempting (i.e. no stub is descheduled).
191 PREEMPTING,
192 // We would like to preempt, but some stub is descheduled.
193 WOULD_PREEMPT_DESCHEDULED,
196 PreemptionState preemption_state_;
198 // Maximum amount of time that we can spend in PREEMPTING.
199 // It is reset when we transition to IDLE.
200 base::TimeDelta max_preemption_time_;
202 struct PendingMessage {
203 uint64 message_number;
204 base::TimeTicks time_received;
206 explicit PendingMessage(uint64 message_number)
207 : message_number(message_number),
208 time_received(base::TimeTicks::Now()) {
212 void UpdatePreemptionState() {
213 switch (preemption_state_) {
214 case IDLE:
215 if (preempting_flag_.get() && !pending_messages_.empty())
216 TransitionToWaiting();
217 break;
218 case WAITING:
219 // A timer will transition us to CHECKING.
220 DCHECK(timer_.IsRunning());
221 break;
222 case CHECKING:
223 if (!pending_messages_.empty()) {
224 base::TimeDelta time_elapsed =
225 base::TimeTicks::Now() - pending_messages_.front().time_received;
226 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
227 // Schedule another check for when the IPC may go long.
228 timer_.Start(
229 FROM_HERE,
230 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
231 time_elapsed,
232 this, &GpuChannelMessageFilter::UpdatePreemptionState);
233 } else {
234 if (a_stub_is_descheduled_)
235 TransitionToWouldPreemptDescheduled();
236 else
237 TransitionToPreempting();
240 break;
241 case PREEMPTING:
242 // A TransitionToIdle() timer should always be running in this state.
243 DCHECK(timer_.IsRunning());
244 if (a_stub_is_descheduled_)
245 TransitionToWouldPreemptDescheduled();
246 else
247 TransitionToIdleIfCaughtUp();
248 break;
249 case WOULD_PREEMPT_DESCHEDULED:
250 // A TransitionToIdle() timer should never be running in this state.
251 DCHECK(!timer_.IsRunning());
252 if (!a_stub_is_descheduled_)
253 TransitionToPreempting();
254 else
255 TransitionToIdleIfCaughtUp();
256 break;
257 default:
258 NOTREACHED();
262 void TransitionToIdleIfCaughtUp() {
263 DCHECK(preemption_state_ == PREEMPTING ||
264 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
265 if (pending_messages_.empty()) {
266 TransitionToIdle();
267 } else {
268 base::TimeDelta time_elapsed =
269 base::TimeTicks::Now() - pending_messages_.front().time_received;
270 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
271 TransitionToIdle();
275 void TransitionToIdle() {
276 DCHECK(preemption_state_ == PREEMPTING ||
277 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
278 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
279 timer_.Stop();
281 preemption_state_ = IDLE;
282 preempting_flag_->Reset();
283 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
285 UpdatePreemptionState();
288 void TransitionToWaiting() {
289 DCHECK_EQ(preemption_state_, IDLE);
290 DCHECK(!timer_.IsRunning());
292 preemption_state_ = WAITING;
293 timer_.Start(
294 FROM_HERE,
295 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
296 this, &GpuChannelMessageFilter::TransitionToChecking);
299 void TransitionToChecking() {
300 DCHECK_EQ(preemption_state_, WAITING);
301 DCHECK(!timer_.IsRunning());
303 preemption_state_ = CHECKING;
304 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
305 UpdatePreemptionState();
308 void TransitionToPreempting() {
309 DCHECK(preemption_state_ == CHECKING ||
310 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
311 DCHECK(!a_stub_is_descheduled_);
313 // Stop any pending state update checks that we may have queued
314 // while CHECKING.
315 if (preemption_state_ == CHECKING)
316 timer_.Stop();
318 preemption_state_ = PREEMPTING;
319 preempting_flag_->Set();
320 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
322 timer_.Start(
323 FROM_HERE,
324 max_preemption_time_,
325 this, &GpuChannelMessageFilter::TransitionToIdle);
327 UpdatePreemptionState();
330 void TransitionToWouldPreemptDescheduled() {
331 DCHECK(preemption_state_ == CHECKING ||
332 preemption_state_ == PREEMPTING);
333 DCHECK(a_stub_is_descheduled_);
335 if (preemption_state_ == CHECKING) {
336 // Stop any pending state update checks that we may have queued
337 // while CHECKING.
338 timer_.Stop();
339 } else {
340 // Stop any TransitionToIdle() timers that we may have queued
341 // while PREEMPTING.
342 timer_.Stop();
343 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
344 if (max_preemption_time_ < base::TimeDelta()) {
345 TransitionToIdle();
346 return;
350 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
351 preempting_flag_->Reset();
352 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
354 UpdatePreemptionState();
357 static void InsertSyncPointOnMainThread(
358 base::WeakPtr<GpuChannel> gpu_channel,
359 scoped_refptr<gpu::SyncPointManager> manager,
360 int32 routing_id,
361 bool retire,
362 uint32 sync_point) {
363 // This function must ensure that the sync point will be retired. Normally
364 // we'll find the stub based on the routing ID, and associate the sync point
365 // with it, but if that fails for any reason (channel or stub already
366 // deleted, invalid routing id), we need to retire the sync point
367 // immediately.
368 if (gpu_channel) {
369 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
370 if (stub) {
371 stub->AddSyncPoint(sync_point);
372 if (retire) {
373 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
374 gpu_channel->OnMessageReceived(message);
376 return;
377 } else {
378 gpu_channel->MessageProcessed();
381 manager->RetireSyncPoint(sync_point);
384 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
385 // passed through - therefore the WeakPtr assumptions are respected.
386 base::WeakPtr<GpuChannel> gpu_channel_;
387 IPC::Sender* sender_;
388 scoped_refptr<gpu::SyncPointManager> sync_point_manager_;
389 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
390 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
392 std::queue<PendingMessage> pending_messages_;
394 // Count of the number of IPCs forwarded to the GpuChannel.
395 uint64 messages_forwarded_to_channel_;
397 base::OneShotTimer<GpuChannelMessageFilter> timer_;
399 bool a_stub_is_descheduled_;
401 // True if this channel can create future sync points.
402 bool future_sync_points_;
405 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
406 GpuWatchdog* watchdog,
407 gfx::GLShareGroup* share_group,
408 gpu::gles2::MailboxManager* mailbox,
409 int client_id,
410 bool software,
411 bool allow_future_sync_points)
412 : gpu_channel_manager_(gpu_channel_manager),
413 messages_processed_(0),
414 client_id_(client_id),
415 share_group_(share_group ? share_group : new gfx::GLShareGroup),
416 mailbox_manager_(mailbox
417 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox)
418 : gpu::gles2::MailboxManager::Create()),
419 watchdog_(watchdog),
420 software_(software),
421 handle_messages_scheduled_(false),
422 currently_processing_message_(NULL),
423 num_stubs_descheduled_(0),
424 allow_future_sync_points_(allow_future_sync_points),
425 weak_factory_(this) {
426 DCHECK(gpu_channel_manager);
427 DCHECK(client_id);
429 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
430 const base::CommandLine* command_line =
431 base::CommandLine::ForCurrentProcess();
432 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
434 subscription_ref_set_ = new gpu::gles2::SubscriptionRefSet();
435 subscription_ref_set_->AddObserver(this);
438 GpuChannel::~GpuChannel() {
439 STLDeleteElements(&deferred_messages_);
440 subscription_ref_set_->RemoveObserver(this);
441 if (preempting_flag_.get())
442 preempting_flag_->Reset();
444 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
445 this);
448 void GpuChannel::Init(base::SingleThreadTaskRunner* io_task_runner,
449 base::WaitableEvent* shutdown_event,
450 IPC::AttachmentBroker* broker) {
451 DCHECK(!channel_.get());
453 // Map renderer ID to a (single) channel to that process.
454 channel_ =
455 IPC::SyncChannel::Create(channel_id_, IPC::Channel::MODE_SERVER, this,
456 io_task_runner, false, shutdown_event, broker);
458 filter_ = new GpuChannelMessageFilter(
459 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(),
460 base::ThreadTaskRunnerHandle::Get(), allow_future_sync_points_);
461 io_task_runner_ = io_task_runner;
462 channel_->AddFilter(filter_.get());
463 pending_valuebuffer_state_ = new gpu::ValueStateMap();
465 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
466 this, base::ThreadTaskRunnerHandle::Get());
469 std::string GpuChannel::GetChannelName() {
470 return channel_id_;
473 #if defined(OS_POSIX)
474 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() {
475 if (!channel_) {
476 NOTREACHED();
477 return base::ScopedFD();
479 return channel_->TakeClientFileDescriptor();
481 #endif // defined(OS_POSIX)
483 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
484 if (log_messages_) {
485 DVLOG(1) << "received message @" << &message << " on channel @" << this
486 << " with type " << message.type();
489 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
490 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
491 // Move Wait commands to the head of the queue, so the renderer
492 // doesn't have to wait any longer than necessary.
493 deferred_messages_.push_front(new IPC::Message(message));
494 } else {
495 deferred_messages_.push_back(new IPC::Message(message));
498 OnScheduled();
500 return true;
503 void GpuChannel::OnChannelError() {
504 gpu_channel_manager_->RemoveChannel(client_id_);
507 bool GpuChannel::Send(IPC::Message* message) {
508 // The GPU process must never send a synchronous IPC message to the renderer
509 // process. This could result in deadlock.
510 DCHECK(!message->is_sync());
511 if (log_messages_) {
512 DVLOG(1) << "sending message @" << message << " on channel @" << this
513 << " with type " << message->type();
516 if (!channel_) {
517 delete message;
518 return false;
521 return channel_->Send(message);
524 void GpuChannel::OnAddSubscription(unsigned int target) {
525 gpu_channel_manager()->Send(
526 new GpuHostMsg_AddSubscription(client_id_, target));
529 void GpuChannel::OnRemoveSubscription(unsigned int target) {
530 gpu_channel_manager()->Send(
531 new GpuHostMsg_RemoveSubscription(client_id_, target));
534 void GpuChannel::RequeueMessage() {
535 DCHECK(currently_processing_message_);
536 deferred_messages_.push_front(
537 new IPC::Message(*currently_processing_message_));
538 messages_processed_--;
539 currently_processing_message_ = NULL;
542 void GpuChannel::OnScheduled() {
543 if (handle_messages_scheduled_)
544 return;
545 // Post a task to handle any deferred messages. The deferred message queue is
546 // not emptied here, which ensures that OnMessageReceived will continue to
547 // defer newly received messages until the ones in the queue have all been
548 // handled by HandleMessage. HandleMessage is invoked as a
549 // task to prevent reentrancy.
550 base::ThreadTaskRunnerHandle::Get()->PostTask(
551 FROM_HERE,
552 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
553 handle_messages_scheduled_ = true;
556 void GpuChannel::StubSchedulingChanged(bool scheduled) {
557 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
558 if (scheduled) {
559 num_stubs_descheduled_--;
560 OnScheduled();
561 } else {
562 num_stubs_descheduled_++;
564 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
565 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
567 if (a_stub_is_descheduled != a_stub_was_descheduled) {
568 if (preempting_flag_.get()) {
569 io_task_runner_->PostTask(
570 FROM_HERE,
571 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
572 filter_, a_stub_is_descheduled));
577 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
578 const gfx::GLSurfaceHandle& window,
579 int32 surface_id,
580 const GPUCreateCommandBufferConfig& init_params,
581 int32 route_id) {
582 TRACE_EVENT1("gpu",
583 "GpuChannel::CreateViewCommandBuffer",
584 "surface_id",
585 surface_id);
587 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
589 // Virtualize compositor contexts on OS X to prevent performance regressions
590 // when enabling FCM.
591 // http://crbug.com/180463
592 bool use_virtualized_gl_context = false;
593 #if defined(OS_MACOSX)
594 use_virtualized_gl_context = true;
595 #endif
597 scoped_ptr<GpuCommandBufferStub> stub(
598 new GpuCommandBufferStub(this,
599 share_group,
600 window,
601 mailbox_manager_.get(),
602 subscription_ref_set_.get(),
603 pending_valuebuffer_state_.get(),
604 gfx::Size(),
605 disallowed_features_,
606 init_params.attribs,
607 init_params.gpu_preference,
608 use_virtualized_gl_context,
609 route_id,
610 surface_id,
611 watchdog_,
612 software_,
613 init_params.active_url));
614 if (preempted_flag_.get())
615 stub->SetPreemptByFlag(preempted_flag_);
616 if (!router_.AddRoute(route_id, stub.get())) {
617 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
618 "failed to add route";
619 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
621 stubs_.AddWithID(stub.release(), route_id);
622 return CREATE_COMMAND_BUFFER_SUCCEEDED;
625 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
626 return stubs_.Lookup(route_id);
629 void GpuChannel::LoseAllContexts() {
630 gpu_channel_manager_->LoseAllContexts();
633 void GpuChannel::MarkAllContextsLost() {
634 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
635 !it.IsAtEnd(); it.Advance()) {
636 it.GetCurrentValue()->MarkContextLost();
640 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
641 return router_.AddRoute(route_id, listener);
644 void GpuChannel::RemoveRoute(int32 route_id) {
645 router_.RemoveRoute(route_id);
648 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
649 if (!preempting_flag_.get()) {
650 preempting_flag_ = new gpu::PreemptionFlag;
651 io_task_runner_->PostTask(
652 FROM_HERE,
653 base::Bind(
654 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
655 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
657 return preempting_flag_.get();
660 void GpuChannel::SetPreemptByFlag(
661 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
662 preempted_flag_ = preempted_flag;
664 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
665 !it.IsAtEnd(); it.Advance()) {
666 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
670 void GpuChannel::OnDestroy() {
671 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
672 gpu_channel_manager_->RemoveChannel(client_id_);
675 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
676 bool handled = true;
677 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
678 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
679 OnCreateOffscreenCommandBuffer)
680 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
681 OnDestroyCommandBuffer)
682 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder,
683 OnCreateJpegDecoder)
684 IPC_MESSAGE_UNHANDLED(handled = false)
685 IPC_END_MESSAGE_MAP()
686 DCHECK(handled) << msg.type();
687 return handled;
690 void GpuChannel::HandleMessage() {
691 handle_messages_scheduled_ = false;
692 if (deferred_messages_.empty())
693 return;
695 IPC::Message* m = NULL;
696 GpuCommandBufferStub* stub = NULL;
698 m = deferred_messages_.front();
699 stub = stubs_.Lookup(m->routing_id());
700 if (stub) {
701 if (!stub->IsScheduled())
702 return;
703 if (stub->IsPreempted()) {
704 OnScheduled();
705 return;
709 scoped_ptr<IPC::Message> message(m);
710 deferred_messages_.pop_front();
711 bool message_processed = true;
713 currently_processing_message_ = message.get();
714 bool result;
715 if (message->routing_id() == MSG_ROUTING_CONTROL)
716 result = OnControlMessageReceived(*message);
717 else
718 result = router_.RouteMessage(*message);
719 currently_processing_message_ = NULL;
721 if (!result) {
722 // Respond to sync messages even if router failed to route.
723 if (message->is_sync()) {
724 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
725 reply->set_reply_error();
726 Send(reply);
728 } else {
729 // If the command buffer becomes unscheduled as a result of handling the
730 // message but still has more commands to process, synthesize an IPC
731 // message to flush that command buffer.
732 if (stub) {
733 if (stub->HasUnprocessedCommands()) {
734 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
735 stub->route_id()));
736 message_processed = false;
740 if (message_processed)
741 MessageProcessed();
743 if (!deferred_messages_.empty()) {
744 OnScheduled();
748 void GpuChannel::OnCreateOffscreenCommandBuffer(
749 const gfx::Size& size,
750 const GPUCreateCommandBufferConfig& init_params,
751 int32 route_id,
752 bool* succeeded) {
753 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
754 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
756 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
757 this,
758 share_group,
759 gfx::GLSurfaceHandle(),
760 mailbox_manager_.get(),
761 subscription_ref_set_.get(),
762 pending_valuebuffer_state_.get(),
763 size,
764 disallowed_features_,
765 init_params.attribs,
766 init_params.gpu_preference,
767 false,
768 route_id,
770 watchdog_,
771 software_,
772 init_params.active_url));
773 if (preempted_flag_.get())
774 stub->SetPreemptByFlag(preempted_flag_);
775 if (!router_.AddRoute(route_id, stub.get())) {
776 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
777 "failed to add route";
778 *succeeded = false;
779 return;
781 stubs_.AddWithID(stub.release(), route_id);
782 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
783 "route_id", route_id);
784 *succeeded = true;
787 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
788 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
789 "route_id", route_id);
791 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
792 if (!stub)
793 return;
794 bool need_reschedule = (stub && !stub->IsScheduled());
795 router_.RemoveRoute(route_id);
796 stubs_.Remove(route_id);
797 // In case the renderer is currently blocked waiting for a sync reply from the
798 // stub, we need to make sure to reschedule the GpuChannel here.
799 if (need_reschedule) {
800 // This stub won't get a chance to reschedule, so update the count now.
801 StubSchedulingChanged(true);
805 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) {
806 if (!jpeg_decoder_) {
807 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_));
809 jpeg_decoder_->AddClient(route_id, reply_msg);
812 void GpuChannel::MessageProcessed() {
813 messages_processed_++;
814 if (preempting_flag_.get()) {
815 io_task_runner_->PostTask(
816 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed,
817 filter_, messages_processed_));
821 void GpuChannel::CacheShader(const std::string& key,
822 const std::string& shader) {
823 gpu_channel_manager_->Send(
824 new GpuHostMsg_CacheShader(client_id_, key, shader));
827 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
828 channel_->AddFilter(filter);
831 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
832 channel_->RemoveFilter(filter);
835 uint64 GpuChannel::GetMemoryUsage() {
836 uint64 size = 0;
837 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
838 !it.IsAtEnd(); it.Advance()) {
839 size += it.GetCurrentValue()->GetMemoryUsage();
841 return size;
844 scoped_refptr<gfx::GLImage> GpuChannel::CreateImageForGpuMemoryBuffer(
845 const gfx::GpuMemoryBufferHandle& handle,
846 const gfx::Size& size,
847 gfx::GpuMemoryBuffer::Format format,
848 uint32 internalformat) {
849 switch (handle.type) {
850 case gfx::SHARED_MEMORY_BUFFER: {
851 scoped_refptr<gfx::GLImageSharedMemory> image(
852 new gfx::GLImageSharedMemory(size, internalformat));
853 if (!image->Initialize(handle, format))
854 return scoped_refptr<gfx::GLImage>();
856 return image;
858 default: {
859 GpuChannelManager* manager = gpu_channel_manager();
860 if (!manager->gpu_memory_buffer_factory())
861 return scoped_refptr<gfx::GLImage>();
863 return manager->gpu_memory_buffer_factory()
864 ->AsImageFactory()
865 ->CreateImageForGpuMemoryBuffer(handle,
866 size,
867 format,
868 internalformat,
869 client_id_);
874 void GpuChannel::HandleUpdateValueState(
875 unsigned int target, const gpu::ValueState& state) {
876 pending_valuebuffer_state_->UpdateState(target, state);
879 bool GpuChannel::OnMemoryDump(base::trace_event::ProcessMemoryDump* pmd) {
880 auto dump_name = GetChannelName();
881 std::replace(dump_name.begin(), dump_name.end(), '.', '_');
883 base::trace_event::MemoryAllocatorDump* dump =
884 pmd->CreateAllocatorDump(base::StringPrintf("gl/%s", dump_name.c_str()));
886 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
887 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
888 GetMemoryUsage());
890 return true;
893 } // namespace content