We started redesigning GpuMemoryBuffer interface to handle multiple buffers [0].
[chromium-blink-merge.git] / content / common / gpu / gpu_channel.cc
blobc12c762c6004f725973182513535274cc6ac2cc7
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #if defined(OS_WIN)
6 #include <windows.h>
7 #endif
9 #include "content/common/gpu/gpu_channel.h"
11 #include <queue>
12 #include <vector>
14 #include "base/bind.h"
15 #include "base/command_line.h"
16 #include "base/message_loop/message_loop_proxy.h"
17 #include "base/stl_util.h"
18 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h"
20 #include "base/trace_event/trace_event.h"
21 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_memory_buffer_factory.h"
23 #include "content/common/gpu/gpu_messages.h"
24 #include "content/public/common/content_switches.h"
25 #include "gpu/command_buffer/common/mailbox.h"
26 #include "gpu/command_buffer/common/value_state.h"
27 #include "gpu/command_buffer/service/gpu_scheduler.h"
28 #include "gpu/command_buffer/service/image_factory.h"
29 #include "gpu/command_buffer/service/mailbox_manager_impl.h"
30 #include "gpu/command_buffer/service/sync_point_manager.h"
31 #include "gpu/command_buffer/service/valuebuffer_manager.h"
32 #include "ipc/ipc_channel.h"
33 #include "ipc/message_filter.h"
34 #include "ui/gl/gl_context.h"
35 #include "ui/gl/gl_image_shared_memory.h"
36 #include "ui/gl/gl_surface.h"
38 #if defined(OS_POSIX)
39 #include "ipc/ipc_channel_posix.h"
40 #endif
42 namespace content {
43 namespace {
45 // Number of milliseconds between successive vsync. Many GL commands block
46 // on vsync, so thresholds for preemption should be multiples of this.
47 const int64 kVsyncIntervalMs = 17;
49 // Amount of time that we will wait for an IPC to be processed before
50 // preempting. After a preemption, we must wait this long before triggering
51 // another preemption.
52 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
54 // Once we trigger a preemption, the maximum duration that we will wait
55 // before clearing the preemption.
56 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
58 // Stop the preemption once the time for the longest pending IPC drops
59 // below this threshold.
60 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
62 } // anonymous namespace
64 // This filter does three things:
65 // - it counts and timestamps each message forwarded to the channel
66 // so that we can preempt other channels if a message takes too long to
67 // process. To guarantee fairness, we must wait a minimum amount of time
68 // before preempting and we limit the amount of time that we can preempt in
69 // one shot (see constants above).
70 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
71 // thread, generating the sync point ID and responding immediately, and then
72 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
73 // into the channel's queue.
74 // - it generates mailbox names for clients of the GPU process on the IO thread.
75 class GpuChannelMessageFilter : public IPC::MessageFilter {
76 public:
77 GpuChannelMessageFilter(
78 base::WeakPtr<GpuChannel> gpu_channel,
79 scoped_refptr<gpu::SyncPointManager> sync_point_manager,
80 scoped_refptr<base::MessageLoopProxy> message_loop,
81 bool future_sync_points)
82 : preemption_state_(IDLE),
83 gpu_channel_(gpu_channel),
84 sender_(NULL),
85 sync_point_manager_(sync_point_manager),
86 message_loop_(message_loop),
87 messages_forwarded_to_channel_(0),
88 a_stub_is_descheduled_(false),
89 future_sync_points_(future_sync_points) {}
91 void OnFilterAdded(IPC::Sender* sender) override {
92 DCHECK(!sender_);
93 sender_ = sender;
96 void OnFilterRemoved() override {
97 DCHECK(sender_);
98 sender_ = NULL;
101 bool OnMessageReceived(const IPC::Message& message) override {
102 DCHECK(sender_);
104 bool handled = false;
105 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
106 !future_sync_points_) {
107 DLOG(ERROR) << "Untrusted client should not send "
108 "GpuCommandBufferMsg_RetireSyncPoint message";
109 return true;
112 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
113 Tuple<bool> retire;
114 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
115 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
116 &retire)) {
117 reply->set_reply_error();
118 Send(reply);
119 return true;
121 if (!future_sync_points_ && !get<0>(retire)) {
122 LOG(ERROR) << "Untrusted contexts can't create future sync points";
123 reply->set_reply_error();
124 Send(reply);
125 return true;
127 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
128 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
129 Send(reply);
130 message_loop_->PostTask(
131 FROM_HERE,
132 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
133 gpu_channel_,
134 sync_point_manager_,
135 message.routing_id(),
136 get<0>(retire),
137 sync_point));
138 handled = true;
141 // All other messages get processed by the GpuChannel.
142 messages_forwarded_to_channel_++;
143 if (preempting_flag_.get())
144 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_));
145 UpdatePreemptionState();
147 return handled;
150 void MessageProcessed(uint64 messages_processed) {
151 while (!pending_messages_.empty() &&
152 pending_messages_.front().message_number <= messages_processed)
153 pending_messages_.pop();
154 UpdatePreemptionState();
157 void SetPreemptingFlagAndSchedulingState(
158 gpu::PreemptionFlag* preempting_flag,
159 bool a_stub_is_descheduled) {
160 preempting_flag_ = preempting_flag;
161 a_stub_is_descheduled_ = a_stub_is_descheduled;
164 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
165 a_stub_is_descheduled_ = a_stub_is_descheduled;
166 UpdatePreemptionState();
169 bool Send(IPC::Message* message) {
170 return sender_->Send(message);
173 protected:
174 ~GpuChannelMessageFilter() override {}
176 private:
177 enum PreemptionState {
178 // Either there's no other channel to preempt, there are no messages
179 // pending processing, or we just finished preempting and have to wait
180 // before preempting again.
181 IDLE,
182 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
183 WAITING,
184 // We can preempt whenever any IPC processing takes more than
185 // kPreemptWaitTimeMs.
186 CHECKING,
187 // We are currently preempting (i.e. no stub is descheduled).
188 PREEMPTING,
189 // We would like to preempt, but some stub is descheduled.
190 WOULD_PREEMPT_DESCHEDULED,
193 PreemptionState preemption_state_;
195 // Maximum amount of time that we can spend in PREEMPTING.
196 // It is reset when we transition to IDLE.
197 base::TimeDelta max_preemption_time_;
199 struct PendingMessage {
200 uint64 message_number;
201 base::TimeTicks time_received;
203 explicit PendingMessage(uint64 message_number)
204 : message_number(message_number),
205 time_received(base::TimeTicks::Now()) {
209 void UpdatePreemptionState() {
210 switch (preemption_state_) {
211 case IDLE:
212 if (preempting_flag_.get() && !pending_messages_.empty())
213 TransitionToWaiting();
214 break;
215 case WAITING:
216 // A timer will transition us to CHECKING.
217 DCHECK(timer_.IsRunning());
218 break;
219 case CHECKING:
220 if (!pending_messages_.empty()) {
221 base::TimeDelta time_elapsed =
222 base::TimeTicks::Now() - pending_messages_.front().time_received;
223 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
224 // Schedule another check for when the IPC may go long.
225 timer_.Start(
226 FROM_HERE,
227 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
228 time_elapsed,
229 this, &GpuChannelMessageFilter::UpdatePreemptionState);
230 } else {
231 if (a_stub_is_descheduled_)
232 TransitionToWouldPreemptDescheduled();
233 else
234 TransitionToPreempting();
237 break;
238 case PREEMPTING:
239 // A TransitionToIdle() timer should always be running in this state.
240 DCHECK(timer_.IsRunning());
241 if (a_stub_is_descheduled_)
242 TransitionToWouldPreemptDescheduled();
243 else
244 TransitionToIdleIfCaughtUp();
245 break;
246 case WOULD_PREEMPT_DESCHEDULED:
247 // A TransitionToIdle() timer should never be running in this state.
248 DCHECK(!timer_.IsRunning());
249 if (!a_stub_is_descheduled_)
250 TransitionToPreempting();
251 else
252 TransitionToIdleIfCaughtUp();
253 break;
254 default:
255 NOTREACHED();
259 void TransitionToIdleIfCaughtUp() {
260 DCHECK(preemption_state_ == PREEMPTING ||
261 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
262 if (pending_messages_.empty()) {
263 TransitionToIdle();
264 } else {
265 base::TimeDelta time_elapsed =
266 base::TimeTicks::Now() - pending_messages_.front().time_received;
267 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
268 TransitionToIdle();
272 void TransitionToIdle() {
273 DCHECK(preemption_state_ == PREEMPTING ||
274 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
275 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
276 timer_.Stop();
278 preemption_state_ = IDLE;
279 preempting_flag_->Reset();
280 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
282 UpdatePreemptionState();
285 void TransitionToWaiting() {
286 DCHECK_EQ(preemption_state_, IDLE);
287 DCHECK(!timer_.IsRunning());
289 preemption_state_ = WAITING;
290 timer_.Start(
291 FROM_HERE,
292 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs),
293 this, &GpuChannelMessageFilter::TransitionToChecking);
296 void TransitionToChecking() {
297 DCHECK_EQ(preemption_state_, WAITING);
298 DCHECK(!timer_.IsRunning());
300 preemption_state_ = CHECKING;
301 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs);
302 UpdatePreemptionState();
305 void TransitionToPreempting() {
306 DCHECK(preemption_state_ == CHECKING ||
307 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
308 DCHECK(!a_stub_is_descheduled_);
310 // Stop any pending state update checks that we may have queued
311 // while CHECKING.
312 if (preemption_state_ == CHECKING)
313 timer_.Stop();
315 preemption_state_ = PREEMPTING;
316 preempting_flag_->Set();
317 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1);
319 timer_.Start(
320 FROM_HERE,
321 max_preemption_time_,
322 this, &GpuChannelMessageFilter::TransitionToIdle);
324 UpdatePreemptionState();
327 void TransitionToWouldPreemptDescheduled() {
328 DCHECK(preemption_state_ == CHECKING ||
329 preemption_state_ == PREEMPTING);
330 DCHECK(a_stub_is_descheduled_);
332 if (preemption_state_ == CHECKING) {
333 // Stop any pending state update checks that we may have queued
334 // while CHECKING.
335 timer_.Stop();
336 } else {
337 // Stop any TransitionToIdle() timers that we may have queued
338 // while PREEMPTING.
339 timer_.Stop();
340 max_preemption_time_ = timer_.desired_run_time() - base::TimeTicks::Now();
341 if (max_preemption_time_ < base::TimeDelta()) {
342 TransitionToIdle();
343 return;
347 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
348 preempting_flag_->Reset();
349 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
351 UpdatePreemptionState();
354 static void InsertSyncPointOnMainThread(
355 base::WeakPtr<GpuChannel> gpu_channel,
356 scoped_refptr<gpu::SyncPointManager> manager,
357 int32 routing_id,
358 bool retire,
359 uint32 sync_point) {
360 // This function must ensure that the sync point will be retired. Normally
361 // we'll find the stub based on the routing ID, and associate the sync point
362 // with it, but if that fails for any reason (channel or stub already
363 // deleted, invalid routing id), we need to retire the sync point
364 // immediately.
365 if (gpu_channel) {
366 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
367 if (stub) {
368 stub->AddSyncPoint(sync_point);
369 if (retire) {
370 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
371 gpu_channel->OnMessageReceived(message);
373 return;
374 } else {
375 gpu_channel->MessageProcessed();
378 manager->RetireSyncPoint(sync_point);
381 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
382 // passed through - therefore the WeakPtr assumptions are respected.
383 base::WeakPtr<GpuChannel> gpu_channel_;
384 IPC::Sender* sender_;
385 scoped_refptr<gpu::SyncPointManager> sync_point_manager_;
386 scoped_refptr<base::MessageLoopProxy> message_loop_;
387 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
389 std::queue<PendingMessage> pending_messages_;
391 // Count of the number of IPCs forwarded to the GpuChannel.
392 uint64 messages_forwarded_to_channel_;
394 base::OneShotTimer<GpuChannelMessageFilter> timer_;
396 bool a_stub_is_descheduled_;
398 // True if this channel can create future sync points.
399 bool future_sync_points_;
402 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
403 GpuWatchdog* watchdog,
404 gfx::GLShareGroup* share_group,
405 gpu::gles2::MailboxManager* mailbox,
406 int client_id,
407 bool software,
408 bool allow_future_sync_points)
409 : gpu_channel_manager_(gpu_channel_manager),
410 messages_processed_(0),
411 client_id_(client_id),
412 share_group_(share_group ? share_group : new gfx::GLShareGroup),
413 mailbox_manager_(mailbox ? mailbox : new gpu::gles2::MailboxManagerImpl),
414 watchdog_(watchdog),
415 software_(software),
416 handle_messages_scheduled_(false),
417 currently_processing_message_(NULL),
418 num_stubs_descheduled_(0),
419 allow_future_sync_points_(allow_future_sync_points),
420 weak_factory_(this) {
421 DCHECK(gpu_channel_manager);
422 DCHECK(client_id);
424 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
425 const base::CommandLine* command_line =
426 base::CommandLine::ForCurrentProcess();
427 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
429 subscription_ref_set_ = new gpu::gles2::SubscriptionRefSet();
430 subscription_ref_set_->AddObserver(this);
433 GpuChannel::~GpuChannel() {
434 STLDeleteElements(&deferred_messages_);
435 subscription_ref_set_->RemoveObserver(this);
436 if (preempting_flag_.get())
437 preempting_flag_->Reset();
440 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
441 base::WaitableEvent* shutdown_event) {
442 DCHECK(!channel_.get());
444 // Map renderer ID to a (single) channel to that process.
445 channel_ = IPC::SyncChannel::Create(channel_id_,
446 IPC::Channel::MODE_SERVER,
447 this,
448 io_message_loop,
449 false,
450 shutdown_event);
452 filter_ =
453 new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
454 gpu_channel_manager_->sync_point_manager(),
455 base::MessageLoopProxy::current(),
456 allow_future_sync_points_);
457 io_message_loop_ = io_message_loop;
458 channel_->AddFilter(filter_.get());
459 pending_valuebuffer_state_ = new gpu::ValueStateMap();
462 std::string GpuChannel::GetChannelName() {
463 return channel_id_;
466 #if defined(OS_POSIX)
467 base::ScopedFD GpuChannel::TakeRendererFileDescriptor() {
468 if (!channel_) {
469 NOTREACHED();
470 return base::ScopedFD();
472 return channel_->TakeClientFileDescriptor();
474 #endif // defined(OS_POSIX)
476 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
477 if (log_messages_) {
478 DVLOG(1) << "received message @" << &message << " on channel @" << this
479 << " with type " << message.type();
482 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
483 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
484 // Move Wait commands to the head of the queue, so the renderer
485 // doesn't have to wait any longer than necessary.
486 deferred_messages_.push_front(new IPC::Message(message));
487 } else {
488 deferred_messages_.push_back(new IPC::Message(message));
491 OnScheduled();
493 return true;
496 void GpuChannel::OnChannelError() {
497 gpu_channel_manager_->RemoveChannel(client_id_);
500 bool GpuChannel::Send(IPC::Message* message) {
501 // The GPU process must never send a synchronous IPC message to the renderer
502 // process. This could result in deadlock.
503 DCHECK(!message->is_sync());
504 if (log_messages_) {
505 DVLOG(1) << "sending message @" << message << " on channel @" << this
506 << " with type " << message->type();
509 if (!channel_) {
510 delete message;
511 return false;
514 return channel_->Send(message);
517 void GpuChannel::OnAddSubscription(unsigned int target) {
518 gpu_channel_manager()->Send(
519 new GpuHostMsg_AddSubscription(client_id_, target));
522 void GpuChannel::OnRemoveSubscription(unsigned int target) {
523 gpu_channel_manager()->Send(
524 new GpuHostMsg_RemoveSubscription(client_id_, target));
527 void GpuChannel::RequeueMessage() {
528 DCHECK(currently_processing_message_);
529 deferred_messages_.push_front(
530 new IPC::Message(*currently_processing_message_));
531 messages_processed_--;
532 currently_processing_message_ = NULL;
535 void GpuChannel::OnScheduled() {
536 if (handle_messages_scheduled_)
537 return;
538 // Post a task to handle any deferred messages. The deferred message queue is
539 // not emptied here, which ensures that OnMessageReceived will continue to
540 // defer newly received messages until the ones in the queue have all been
541 // handled by HandleMessage. HandleMessage is invoked as a
542 // task to prevent reentrancy.
543 base::MessageLoop::current()->PostTask(
544 FROM_HERE,
545 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
546 handle_messages_scheduled_ = true;
549 void GpuChannel::StubSchedulingChanged(bool scheduled) {
550 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
551 if (scheduled) {
552 num_stubs_descheduled_--;
553 OnScheduled();
554 } else {
555 num_stubs_descheduled_++;
557 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
558 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
560 if (a_stub_is_descheduled != a_stub_was_descheduled) {
561 if (preempting_flag_.get()) {
562 io_message_loop_->PostTask(
563 FROM_HERE,
564 base::Bind(&GpuChannelMessageFilter::UpdateStubSchedulingState,
565 filter_,
566 a_stub_is_descheduled));
571 CreateCommandBufferResult GpuChannel::CreateViewCommandBuffer(
572 const gfx::GLSurfaceHandle& window,
573 int32 surface_id,
574 const GPUCreateCommandBufferConfig& init_params,
575 int32 route_id) {
576 TRACE_EVENT1("gpu",
577 "GpuChannel::CreateViewCommandBuffer",
578 "surface_id",
579 surface_id);
581 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
583 // Virtualize compositor contexts on OS X to prevent performance regressions
584 // when enabling FCM.
585 // http://crbug.com/180463
586 bool use_virtualized_gl_context = false;
587 #if defined(OS_MACOSX)
588 use_virtualized_gl_context = true;
589 #endif
591 scoped_ptr<GpuCommandBufferStub> stub(
592 new GpuCommandBufferStub(this,
593 share_group,
594 window,
595 mailbox_manager_.get(),
596 subscription_ref_set_.get(),
597 pending_valuebuffer_state_.get(),
598 gfx::Size(),
599 disallowed_features_,
600 init_params.attribs,
601 init_params.gpu_preference,
602 use_virtualized_gl_context,
603 route_id,
604 surface_id,
605 watchdog_,
606 software_,
607 init_params.active_url));
608 if (preempted_flag_.get())
609 stub->SetPreemptByFlag(preempted_flag_);
610 if (!router_.AddRoute(route_id, stub.get())) {
611 DLOG(ERROR) << "GpuChannel::CreateViewCommandBuffer(): "
612 "failed to add route";
613 return CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
615 stubs_.AddWithID(stub.release(), route_id);
616 return CREATE_COMMAND_BUFFER_SUCCEEDED;
619 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32 route_id) {
620 return stubs_.Lookup(route_id);
623 void GpuChannel::LoseAllContexts() {
624 gpu_channel_manager_->LoseAllContexts();
627 void GpuChannel::MarkAllContextsLost() {
628 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
629 !it.IsAtEnd(); it.Advance()) {
630 it.GetCurrentValue()->MarkContextLost();
634 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
635 return router_.AddRoute(route_id, listener);
638 void GpuChannel::RemoveRoute(int32 route_id) {
639 router_.RemoveRoute(route_id);
642 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
643 if (!preempting_flag_.get()) {
644 preempting_flag_ = new gpu::PreemptionFlag;
645 io_message_loop_->PostTask(
646 FROM_HERE, base::Bind(
647 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
648 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
650 return preempting_flag_.get();
653 void GpuChannel::SetPreemptByFlag(
654 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
655 preempted_flag_ = preempted_flag;
657 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
658 !it.IsAtEnd(); it.Advance()) {
659 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
663 void GpuChannel::OnDestroy() {
664 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
665 gpu_channel_manager_->RemoveChannel(client_id_);
668 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
669 bool handled = true;
670 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
671 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
672 OnCreateOffscreenCommandBuffer)
673 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer,
674 OnDestroyCommandBuffer)
675 IPC_MESSAGE_UNHANDLED(handled = false)
676 IPC_END_MESSAGE_MAP()
677 DCHECK(handled) << msg.type();
678 return handled;
681 void GpuChannel::HandleMessage() {
682 handle_messages_scheduled_ = false;
683 if (deferred_messages_.empty())
684 return;
686 IPC::Message* m = NULL;
687 GpuCommandBufferStub* stub = NULL;
689 m = deferred_messages_.front();
690 stub = stubs_.Lookup(m->routing_id());
691 if (stub) {
692 if (!stub->IsScheduled())
693 return;
694 if (stub->IsPreempted()) {
695 OnScheduled();
696 return;
700 scoped_ptr<IPC::Message> message(m);
701 deferred_messages_.pop_front();
702 bool message_processed = true;
704 currently_processing_message_ = message.get();
705 bool result;
706 if (message->routing_id() == MSG_ROUTING_CONTROL)
707 result = OnControlMessageReceived(*message);
708 else
709 result = router_.RouteMessage(*message);
710 currently_processing_message_ = NULL;
712 if (!result) {
713 // Respond to sync messages even if router failed to route.
714 if (message->is_sync()) {
715 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
716 reply->set_reply_error();
717 Send(reply);
719 } else {
720 // If the command buffer becomes unscheduled as a result of handling the
721 // message but still has more commands to process, synthesize an IPC
722 // message to flush that command buffer.
723 if (stub) {
724 if (stub->HasUnprocessedCommands()) {
725 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled(
726 stub->route_id()));
727 message_processed = false;
731 if (message_processed)
732 MessageProcessed();
734 if (!deferred_messages_.empty()) {
735 OnScheduled();
739 void GpuChannel::OnCreateOffscreenCommandBuffer(
740 const gfx::Size& size,
741 const GPUCreateCommandBufferConfig& init_params,
742 int32 route_id,
743 bool* succeeded) {
744 TRACE_EVENT0("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer");
745 GpuCommandBufferStub* share_group = stubs_.Lookup(init_params.share_group_id);
747 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
748 this,
749 share_group,
750 gfx::GLSurfaceHandle(),
751 mailbox_manager_.get(),
752 subscription_ref_set_.get(),
753 pending_valuebuffer_state_.get(),
754 size,
755 disallowed_features_,
756 init_params.attribs,
757 init_params.gpu_preference,
758 false,
759 route_id,
761 watchdog_,
762 software_,
763 init_params.active_url));
764 if (preempted_flag_.get())
765 stub->SetPreemptByFlag(preempted_flag_);
766 if (!router_.AddRoute(route_id, stub.get())) {
767 DLOG(ERROR) << "GpuChannel::OnCreateOffscreenCommandBuffer(): "
768 "failed to add route";
769 *succeeded = false;
770 return;
772 stubs_.AddWithID(stub.release(), route_id);
773 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer",
774 "route_id", route_id);
775 *succeeded = true;
778 void GpuChannel::OnDestroyCommandBuffer(int32 route_id) {
779 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
780 "route_id", route_id);
782 GpuCommandBufferStub* stub = stubs_.Lookup(route_id);
783 if (!stub)
784 return;
785 bool need_reschedule = (stub && !stub->IsScheduled());
786 router_.RemoveRoute(route_id);
787 stubs_.Remove(route_id);
788 // In case the renderer is currently blocked waiting for a sync reply from the
789 // stub, we need to make sure to reschedule the GpuChannel here.
790 if (need_reschedule) {
791 // This stub won't get a chance to reschedule, so update the count now.
792 StubSchedulingChanged(true);
796 void GpuChannel::MessageProcessed() {
797 messages_processed_++;
798 if (preempting_flag_.get()) {
799 io_message_loop_->PostTask(
800 FROM_HERE,
801 base::Bind(&GpuChannelMessageFilter::MessageProcessed,
802 filter_,
803 messages_processed_));
807 void GpuChannel::CacheShader(const std::string& key,
808 const std::string& shader) {
809 gpu_channel_manager_->Send(
810 new GpuHostMsg_CacheShader(client_id_, key, shader));
813 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
814 channel_->AddFilter(filter);
817 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) {
818 channel_->RemoveFilter(filter);
821 uint64 GpuChannel::GetMemoryUsage() {
822 uint64 size = 0;
823 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
824 !it.IsAtEnd(); it.Advance()) {
825 size += it.GetCurrentValue()->GetMemoryUsage();
827 return size;
830 scoped_refptr<gfx::GLImage> GpuChannel::CreateImageForGpuMemoryBuffer(
831 const gfx::GpuMemoryBufferHandle& handle,
832 const gfx::Size& size,
833 gfx::GpuMemoryBuffer::Format format,
834 uint32 internalformat) {
835 switch (handle.type) {
836 case gfx::SHARED_MEMORY_BUFFER: {
837 scoped_refptr<gfx::GLImageSharedMemory> image(
838 new gfx::GLImageSharedMemory(size, internalformat));
839 if (!image->Initialize(handle, format))
840 return scoped_refptr<gfx::GLImage>();
842 return image;
844 default: {
845 GpuChannelManager* manager = gpu_channel_manager();
846 if (!manager->gpu_memory_buffer_factory())
847 return scoped_refptr<gfx::GLImage>();
849 return manager->gpu_memory_buffer_factory()
850 ->AsImageFactory()
851 ->CreateImageForGpuMemoryBuffer(handle,
852 size,
853 format,
854 internalformat,
855 client_id_);
860 void GpuChannel::HandleUpdateValueState(
861 unsigned int target, const gpu::ValueState& state) {
862 pending_valuebuffer_state_->UpdateState(target, state);
865 } // namespace content