Update UnusedResources lint suppressions.
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blob81a6e2c224fd9385cb605e7ea7b779b655662dc1
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/hash.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_factory.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "gpu/command_buffer/service/sync_point_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gl/gl_bindings.h"
40 #include "ui/gl/gl_switches.h"
42 #if defined(OS_WIN)
43 #include "base/win/win_util.h"
44 #include "content/public/common/sandbox_init.h"
45 #endif
47 #if defined(OS_ANDROID)
48 #include "content/common/gpu/stream_texture_android.h"
49 #endif
51 namespace content {
52 struct WaitForCommandState {
53 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
54 : start(start), end(end), reply(reply) {}
56 int32 start;
57 int32 end;
58 scoped_ptr<IPC::Message> reply;
61 namespace {
63 // The GpuCommandBufferMemoryTracker class provides a bridge between the
64 // ContextGroup's memory type managers and the GpuMemoryManager class.
65 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
66 public:
67 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
68 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
69 CreateTrackingGroup(channel->renderer_pid(), this)) {
72 void TrackMemoryAllocatedChange(
73 size_t old_size,
74 size_t new_size,
75 gpu::gles2::MemoryTracker::Pool pool) override {
76 tracking_group_->TrackMemoryAllocatedChange(
77 old_size, new_size, pool);
80 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
81 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
84 private:
85 ~GpuCommandBufferMemoryTracker() override {}
86 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
88 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
91 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
92 // url_hash matches.
93 void FastSetActiveURL(const GURL& url, size_t url_hash) {
94 // Leave the previously set URL in the empty case -- empty URLs are given by
95 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
96 // onscreen context URL was set previously and will show up even when a crash
97 // occurs during offscreen command processing.
98 if (url.is_empty())
99 return;
100 static size_t g_last_url_hash = 0;
101 if (url_hash != g_last_url_hash) {
102 g_last_url_hash = url_hash;
103 GetContentClient()->SetActiveURL(url);
107 // The first time polling a fence, delay some extra time to allow other
108 // stubs to process some work, or else the timing of the fences could
109 // allow a pattern of alternating fast and slow frames to occur.
110 const int64 kHandleMoreWorkPeriodMs = 2;
111 const int64 kHandleMoreWorkPeriodBusyMs = 1;
113 // Prevents idle work from being starved.
114 const int64 kMaxTimeSinceIdleMs = 10;
116 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
117 public:
118 static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
119 CreateForChannel(GpuChannel* channel);
121 void AppendAsTraceFormat(std::string* out) const override {
122 std::string tmp;
123 base::JSONWriter::Write(*value_, &tmp);
124 *out += tmp;
127 private:
128 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
129 ~DevToolsChannelData() override {}
130 scoped_ptr<base::Value> value_;
131 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
134 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
135 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
136 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
137 res->SetInteger("renderer_pid", channel->renderer_pid());
138 res->SetDouble("used_bytes", channel->GetMemoryUsage());
139 res->SetDouble("limit_bytes",
140 channel->gpu_channel_manager()
141 ->gpu_memory_manager()
142 ->GetMaximumClientAllocation());
143 return new DevToolsChannelData(res.release());
146 } // namespace
148 GpuCommandBufferStub::GpuCommandBufferStub(
149 GpuChannel* channel,
150 GpuCommandBufferStub* share_group,
151 const gfx::GLSurfaceHandle& handle,
152 gpu::gles2::MailboxManager* mailbox_manager,
153 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
154 gpu::ValueStateMap* pending_valuebuffer_state,
155 const gfx::Size& size,
156 const gpu::gles2::DisallowedFeatures& disallowed_features,
157 const std::vector<int32>& attribs,
158 gfx::GpuPreference gpu_preference,
159 bool use_virtualized_gl_context,
160 int32 route_id,
161 int32 surface_id,
162 GpuWatchdog* watchdog,
163 bool software,
164 const GURL& active_url)
165 : channel_(channel),
166 handle_(handle),
167 initial_size_(size),
168 disallowed_features_(disallowed_features),
169 requested_attribs_(attribs),
170 gpu_preference_(gpu_preference),
171 use_virtualized_gl_context_(use_virtualized_gl_context),
172 route_id_(route_id),
173 surface_id_(surface_id),
174 software_(software),
175 last_flush_count_(0),
176 last_memory_allocation_valid_(false),
177 watchdog_(watchdog),
178 sync_point_wait_count_(0),
179 delayed_work_scheduled_(false),
180 previous_messages_processed_(0),
181 active_url_(active_url),
182 total_gpu_memory_(0) {
183 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
184 FastSetActiveURL(active_url_, active_url_hash_);
186 gpu::gles2::ContextCreationAttribHelper attrib_parser;
187 attrib_parser.Parse(requested_attribs_);
189 if (share_group) {
190 context_group_ = share_group->context_group_;
191 DCHECK(context_group_->bind_generates_resource() ==
192 attrib_parser.bind_generates_resource);
193 } else {
194 context_group_ = new gpu::gles2::ContextGroup(
195 mailbox_manager,
196 new GpuCommandBufferMemoryTracker(channel),
197 channel_->gpu_channel_manager()->shader_translator_cache(),
198 NULL,
199 subscription_ref_set,
200 pending_valuebuffer_state,
201 attrib_parser.bind_generates_resource);
204 use_virtualized_gl_context_ |=
205 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
207 bool is_offscreen = surface_id_ == 0;
208 if (is_offscreen && initial_size_.IsEmpty()) {
209 // If we're an offscreen surface with zero width and/or height, set to a
210 // non-zero size so that we have a complete framebuffer for operations like
211 // glClear.
212 initial_size_ = gfx::Size(1, 1);
216 GpuCommandBufferStub::~GpuCommandBufferStub() {
217 Destroy();
219 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
220 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
223 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
224 return channel()->gpu_channel_manager()->gpu_memory_manager();
227 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
228 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
229 "GPUTask",
230 "data",
231 DevToolsChannelData::CreateForChannel(channel()));
232 FastSetActiveURL(active_url_, active_url_hash_);
234 bool have_context = false;
235 // Ensure the appropriate GL context is current before handling any IPC
236 // messages directed at the command buffer. This ensures that the message
237 // handler can assume that the context is current (not necessary for
238 // RetireSyncPoint or WaitSyncPoint).
239 if (decoder_.get() &&
240 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
241 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
242 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
243 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
244 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
245 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
246 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
247 message.type() !=
248 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
249 if (!MakeCurrent())
250 return false;
251 have_context = true;
254 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
255 // here. This is so the reply can be delayed if the scheduler is unscheduled.
256 bool handled = true;
257 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
258 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
259 OnInitialize);
260 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
261 OnSetGetBuffer);
262 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
263 OnProduceFrontBuffer);
264 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
265 OnWaitForTokenInRange);
266 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
267 OnWaitForGetOffsetInRange);
268 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
269 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
270 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
271 OnRegisterTransferBuffer);
272 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
273 OnDestroyTransferBuffer);
274 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
275 OnCreateVideoDecoder)
276 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
277 OnCreateVideoEncoder)
278 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
279 OnSetSurfaceVisible)
280 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
281 OnRetireSyncPoint)
282 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
283 OnSignalSyncPoint)
284 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
285 OnSignalQuery)
286 IPC_MESSAGE_HANDLER(
287 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
288 OnSetClientHasMemoryAllocationChangedCallback)
289 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
290 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
291 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
292 OnCreateStreamTexture)
293 IPC_MESSAGE_UNHANDLED(handled = false)
294 IPC_END_MESSAGE_MAP()
296 CheckCompleteWaits();
298 if (have_context) {
299 // Ensure that any delayed work that was created will be handled.
300 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
303 DCHECK(handled);
304 return handled;
307 bool GpuCommandBufferStub::Send(IPC::Message* message) {
308 return channel_->Send(message);
311 bool GpuCommandBufferStub::IsScheduled() {
312 return (!scheduler_.get() || scheduler_->IsScheduled());
315 bool GpuCommandBufferStub::HasMoreWork() {
316 return scheduler_.get() && scheduler_->HasMoreWork();
319 void GpuCommandBufferStub::PollWork() {
320 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
321 delayed_work_scheduled_ = false;
322 FastSetActiveURL(active_url_, active_url_hash_);
323 if (decoder_.get() && !MakeCurrent())
324 return;
326 if (scheduler_) {
327 uint64 current_messages_processed =
328 channel()->gpu_channel_manager()->MessagesProcessed();
329 // We're idle when no messages were processed or scheduled.
330 bool is_idle =
331 (previous_messages_processed_ == current_messages_processed) &&
332 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
333 if (!is_idle && !last_idle_time_.is_null()) {
334 base::TimeDelta time_since_idle =
335 base::TimeTicks::Now() - last_idle_time_;
336 base::TimeDelta max_time_since_idle =
337 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
339 // Force idle when it's been too long since last time we were idle.
340 if (time_since_idle > max_time_since_idle)
341 is_idle = true;
344 if (is_idle) {
345 last_idle_time_ = base::TimeTicks::Now();
346 scheduler_->PerformIdleWork();
349 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
352 bool GpuCommandBufferStub::HasUnprocessedCommands() {
353 if (command_buffer_) {
354 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
355 return command_buffer_->GetPutOffset() != state.get_offset &&
356 !gpu::error::IsError(state.error);
358 return false;
361 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
362 if (!HasMoreWork()) {
363 last_idle_time_ = base::TimeTicks();
364 return;
367 if (delayed_work_scheduled_)
368 return;
369 delayed_work_scheduled_ = true;
371 // Idle when no messages are processed between now and when
372 // PollWork is called.
373 previous_messages_processed_ =
374 channel()->gpu_channel_manager()->MessagesProcessed();
375 if (last_idle_time_.is_null())
376 last_idle_time_ = base::TimeTicks::Now();
378 // IsScheduled() returns true after passing all unschedule fences
379 // and this is when we can start performing idle work. Idle work
380 // is done synchronously so we can set delay to 0 and instead poll
381 // for more work at the rate idle work is performed. This also ensures
382 // that idle work is done as efficiently as possible without any
383 // unnecessary delays.
384 if (scheduler_.get() &&
385 scheduler_->IsScheduled() &&
386 scheduler_->HasMoreIdleWork()) {
387 delay = 0;
390 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
391 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
392 base::TimeDelta::FromMilliseconds(delay));
395 bool GpuCommandBufferStub::MakeCurrent() {
396 if (decoder_->MakeCurrent())
397 return true;
398 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
399 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
400 command_buffer_->SetParseError(gpu::error::kLostContext);
401 CheckContextLost();
402 return false;
405 void GpuCommandBufferStub::Destroy() {
406 if (wait_for_token_) {
407 Send(wait_for_token_->reply.release());
408 wait_for_token_.reset();
410 if (wait_for_get_offset_) {
411 Send(wait_for_get_offset_->reply.release());
412 wait_for_get_offset_.reset();
414 if (handle_.is_null() && !active_url_.is_empty()) {
415 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
416 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
417 active_url_));
420 memory_manager_client_state_.reset();
422 while (!sync_points_.empty())
423 OnRetireSyncPoint(sync_points_.front());
425 if (decoder_)
426 decoder_->set_engine(NULL);
428 // The scheduler has raw references to the decoder and the command buffer so
429 // destroy it before those.
430 scheduler_.reset();
432 bool have_context = false;
433 if (decoder_ && decoder_->GetGLContext()) {
434 // Try to make the context current regardless of whether it was lost, so we
435 // don't leak resources.
436 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
438 FOR_EACH_OBSERVER(DestructionObserver,
439 destruction_observers_,
440 OnWillDestroyStub());
442 if (decoder_) {
443 decoder_->Destroy(have_context);
444 decoder_.reset();
447 command_buffer_.reset();
449 // Remove this after crbug.com/248395 is sorted out.
450 surface_ = NULL;
453 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
454 Destroy();
455 GpuCommandBufferMsg_Initialize::WriteReplyParams(
456 reply_message, false, gpu::Capabilities());
457 Send(reply_message);
460 void GpuCommandBufferStub::OnInitialize(
461 base::SharedMemoryHandle shared_state_handle,
462 IPC::Message* reply_message) {
463 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
464 DCHECK(!command_buffer_.get());
466 scoped_ptr<base::SharedMemory> shared_state_shm(
467 new base::SharedMemory(shared_state_handle, false));
469 command_buffer_.reset(new gpu::CommandBufferService(
470 context_group_->transfer_buffer_manager()));
472 bool result = command_buffer_->Initialize();
473 DCHECK(result);
475 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
476 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
477 decoder_.get(),
478 decoder_.get()));
479 if (preemption_flag_.get())
480 scheduler_->SetPreemptByFlag(preemption_flag_);
482 decoder_->set_engine(scheduler_.get());
484 if (!handle_.is_null()) {
485 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
486 if (software_) {
487 LOG(ERROR) << "No software support.";
488 OnInitializeFailed(reply_message);
489 return;
491 #endif
493 surface_ = ImageTransportSurface::CreateSurface(
494 channel_->gpu_channel_manager(),
495 this,
496 handle_);
497 } else {
498 GpuChannelManager* manager = channel_->gpu_channel_manager();
499 surface_ = manager->GetDefaultOffscreenSurface();
502 if (!surface_.get()) {
503 DLOG(ERROR) << "Failed to create surface.";
504 OnInitializeFailed(reply_message);
505 return;
508 scoped_refptr<gfx::GLContext> context;
509 if (use_virtualized_gl_context_ && channel_->share_group()) {
510 context = channel_->share_group()->GetSharedContext();
511 if (!context.get()) {
512 context = gfx::GLContext::CreateGLContext(
513 channel_->share_group(),
514 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
515 gpu_preference_);
516 if (!context.get()) {
517 DLOG(ERROR) << "Failed to create shared context for virtualization.";
518 OnInitializeFailed(reply_message);
519 return;
521 channel_->share_group()->SetSharedContext(context.get());
523 // This should be a non-virtual GL context.
524 DCHECK(context->GetHandle());
525 context = new gpu::GLContextVirtual(
526 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
527 if (!context->Initialize(surface_.get(), gpu_preference_)) {
528 // TODO(sievers): The real context created above for the default
529 // offscreen surface might not be compatible with this surface.
530 // Need to adjust at least GLX to be able to create the initial context
531 // with a config that is compatible with onscreen and offscreen surfaces.
532 context = NULL;
534 DLOG(ERROR) << "Failed to initialize virtual GL context.";
535 OnInitializeFailed(reply_message);
536 return;
539 if (!context.get()) {
540 context = gfx::GLContext::CreateGLContext(
541 channel_->share_group(), surface_.get(), gpu_preference_);
543 if (!context.get()) {
544 DLOG(ERROR) << "Failed to create context.";
545 OnInitializeFailed(reply_message);
546 return;
549 if (!context->MakeCurrent(surface_.get())) {
550 LOG(ERROR) << "Failed to make context current.";
551 OnInitializeFailed(reply_message);
552 return;
555 if (!context->GetGLStateRestorer()) {
556 context->SetGLStateRestorer(
557 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
560 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
561 total_gpu_memory_ = 0;
563 if (!context_group_->has_program_cache()) {
564 context_group_->set_program_cache(
565 channel_->gpu_channel_manager()->program_cache());
568 // Initialize the decoder with either the view or pbuffer GLContext.
569 if (!decoder_->Initialize(surface_,
570 context,
571 !surface_id(),
572 initial_size_,
573 disallowed_features_,
574 requested_attribs_)) {
575 DLOG(ERROR) << "Failed to initialize decoder.";
576 OnInitializeFailed(reply_message);
577 return;
580 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
581 switches::kEnableGPUServiceLogging)) {
582 decoder_->set_log_commands(true);
585 decoder_->GetLogger()->SetMsgCallback(
586 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
587 base::Unretained(this)));
588 decoder_->SetShaderCacheCallback(
589 base::Bind(&GpuCommandBufferStub::SendCachedShader,
590 base::Unretained(this)));
591 decoder_->SetWaitSyncPointCallback(
592 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
593 base::Unretained(this)));
595 command_buffer_->SetPutOffsetChangeCallback(
596 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
597 command_buffer_->SetGetBufferChangeCallback(
598 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
599 base::Unretained(scheduler_.get())));
600 command_buffer_->SetParseErrorCallback(
601 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
602 scheduler_->SetSchedulingChangedCallback(
603 base::Bind(&GpuChannel::StubSchedulingChanged,
604 base::Unretained(channel_)));
606 if (watchdog_) {
607 scheduler_->SetCommandProcessedCallback(
608 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
609 base::Unretained(this)));
612 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
613 if (!shared_state_shm->Map(kSharedStateSize)) {
614 DLOG(ERROR) << "Failed to map shared state buffer.";
615 OnInitializeFailed(reply_message);
616 return;
618 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
619 shared_state_shm.Pass(), kSharedStateSize));
621 gpu::Capabilities capabilities = decoder_->GetCapabilities();
622 capabilities.future_sync_points = channel_->allow_future_sync_points();
624 GpuCommandBufferMsg_Initialize::WriteReplyParams(
625 reply_message, true, capabilities);
626 Send(reply_message);
628 if (handle_.is_null() && !active_url_.is_empty()) {
629 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
630 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
631 active_url_));
635 void GpuCommandBufferStub::OnCreateStreamTexture(
636 uint32 texture_id, int32 stream_id, bool* succeeded) {
637 #if defined(OS_ANDROID)
638 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
639 #else
640 *succeeded = false;
641 #endif
644 void GpuCommandBufferStub::SetLatencyInfoCallback(
645 const LatencyInfoCallback& callback) {
646 latency_info_callback_ = callback;
649 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
650 // The command buffer is pairs of enum, value
651 // search for the requested attribute, return the value.
652 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
653 it != requested_attribs_.end(); ++it) {
654 if (*it++ == attr) {
655 return *it;
658 return -1;
661 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
662 IPC::Message* reply_message) {
663 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
664 if (command_buffer_)
665 command_buffer_->SetGetBuffer(shm_id);
666 Send(reply_message);
669 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
670 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
671 if (!decoder_) {
672 LOG(ERROR) << "Can't produce front buffer before initialization.";
673 return;
676 decoder_->ProduceFrontBuffer(mailbox);
679 void GpuCommandBufferStub::OnParseError() {
680 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
681 DCHECK(command_buffer_.get());
682 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
683 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
684 route_id_, state.context_lost_reason, state.error);
685 msg->set_unblock(true);
686 Send(msg);
688 // Tell the browser about this context loss as well, so it can
689 // determine whether client APIs like WebGL need to be immediately
690 // blocked from automatically running.
691 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
692 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
693 handle_.is_null(), state.context_lost_reason, active_url_));
695 CheckContextLost();
698 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
699 int32 end,
700 IPC::Message* reply_message) {
701 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
702 DCHECK(command_buffer_.get());
703 CheckContextLost();
704 if (wait_for_token_)
705 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
706 wait_for_token_ =
707 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
708 CheckCompleteWaits();
711 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
712 int32 start,
713 int32 end,
714 IPC::Message* reply_message) {
715 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
716 DCHECK(command_buffer_.get());
717 CheckContextLost();
718 if (wait_for_get_offset_) {
719 LOG(ERROR)
720 << "Got WaitForGetOffset command while currently waiting for offset.";
722 wait_for_get_offset_ =
723 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
724 CheckCompleteWaits();
727 void GpuCommandBufferStub::CheckCompleteWaits() {
728 if (wait_for_token_ || wait_for_get_offset_) {
729 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
730 if (wait_for_token_ &&
731 (gpu::CommandBuffer::InRange(
732 wait_for_token_->start, wait_for_token_->end, state.token) ||
733 state.error != gpu::error::kNoError)) {
734 ReportState();
735 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
736 wait_for_token_->reply.get(), state);
737 Send(wait_for_token_->reply.release());
738 wait_for_token_.reset();
740 if (wait_for_get_offset_ &&
741 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
742 wait_for_get_offset_->end,
743 state.get_offset) ||
744 state.error != gpu::error::kNoError)) {
745 ReportState();
746 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
747 wait_for_get_offset_->reply.get(), state);
748 Send(wait_for_get_offset_->reply.release());
749 wait_for_get_offset_.reset();
754 void GpuCommandBufferStub::OnAsyncFlush(
755 int32 put_offset,
756 uint32 flush_count,
757 const std::vector<ui::LatencyInfo>& latency_info) {
758 TRACE_EVENT1(
759 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
761 if (ui::LatencyInfo::Verify(latency_info,
762 "GpuCommandBufferStub::OnAsyncFlush") &&
763 !latency_info_callback_.is_null()) {
764 latency_info_callback_.Run(latency_info);
766 DCHECK(command_buffer_.get());
767 if (flush_count - last_flush_count_ < 0x8000000U) {
768 last_flush_count_ = flush_count;
769 command_buffer_->Flush(put_offset);
770 } else {
771 // We received this message out-of-order. This should not happen but is here
772 // to catch regressions. Ignore the message.
773 NOTREACHED() << "Received a Flush message out-of-order";
776 ReportState();
779 void GpuCommandBufferStub::OnRescheduled() {
780 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
781 command_buffer_->Flush(command_buffer_->GetPutOffset());
782 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
784 if (pre_state.get_offset != post_state.get_offset)
785 ReportState();
788 void GpuCommandBufferStub::OnRegisterTransferBuffer(
789 int32 id,
790 base::SharedMemoryHandle transfer_buffer,
791 uint32 size) {
792 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
794 // Take ownership of the memory and map it into this process.
795 // This validates the size.
796 scoped_ptr<base::SharedMemory> shared_memory(
797 new base::SharedMemory(transfer_buffer, false));
798 if (!shared_memory->Map(size)) {
799 DVLOG(0) << "Failed to map shared memory.";
800 return;
803 if (command_buffer_) {
804 command_buffer_->RegisterTransferBuffer(
805 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
809 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
810 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
812 if (command_buffer_)
813 command_buffer_->DestroyTransferBuffer(id);
816 void GpuCommandBufferStub::OnCommandProcessed() {
817 if (watchdog_)
818 watchdog_->CheckArmed();
821 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
823 void GpuCommandBufferStub::PutChanged() {
824 FastSetActiveURL(active_url_, active_url_hash_);
825 scheduler_->PutChanged();
828 void GpuCommandBufferStub::OnCreateVideoDecoder(
829 media::VideoCodecProfile profile,
830 int32 decoder_route_id,
831 IPC::Message* reply_message) {
832 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
833 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
834 decoder_route_id, this, channel_->io_task_runner());
835 decoder->Initialize(profile, reply_message);
836 // decoder is registered as a DestructionObserver of this stub and will
837 // self-delete during destruction of this stub.
840 void GpuCommandBufferStub::OnCreateVideoEncoder(
841 media::VideoFrame::Format input_format,
842 const gfx::Size& input_visible_size,
843 media::VideoCodecProfile output_profile,
844 uint32 initial_bitrate,
845 int32 encoder_route_id,
846 IPC::Message* reply_message) {
847 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
848 GpuVideoEncodeAccelerator* encoder =
849 new GpuVideoEncodeAccelerator(encoder_route_id, this);
850 encoder->Initialize(input_format,
851 input_visible_size,
852 output_profile,
853 initial_bitrate,
854 reply_message);
855 // encoder is registered as a DestructionObserver of this stub and will
856 // self-delete during destruction of this stub.
859 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
860 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
861 if (memory_manager_client_state_)
862 memory_manager_client_state_->SetVisible(visible);
865 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
866 sync_points_.push_back(sync_point);
869 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
870 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
871 sync_points_.pop_front();
872 GpuChannelManager* manager = channel_->gpu_channel_manager();
873 manager->sync_point_manager()->RetireSyncPoint(sync_point);
876 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
877 if (!sync_point)
878 return true;
879 GpuChannelManager* manager = channel_->gpu_channel_manager();
880 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
881 return true;
883 if (sync_point_wait_count_ == 0) {
884 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
885 "GpuCommandBufferStub", this);
887 scheduler_->SetScheduled(false);
888 ++sync_point_wait_count_;
889 manager->sync_point_manager()->AddSyncPointCallback(
890 sync_point,
891 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
892 this->AsWeakPtr()));
893 return scheduler_->IsScheduled();
896 void GpuCommandBufferStub::OnSyncPointRetired() {
897 --sync_point_wait_count_;
898 if (sync_point_wait_count_ == 0) {
899 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
900 "GpuCommandBufferStub", this);
902 scheduler_->SetScheduled(true);
905 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
906 GpuChannelManager* manager = channel_->gpu_channel_manager();
907 manager->sync_point_manager()->AddSyncPointCallback(
908 sync_point,
909 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
910 this->AsWeakPtr(),
911 id));
914 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
915 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
918 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
919 if (decoder_) {
920 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
921 if (query_manager) {
922 gpu::gles2::QueryManager::Query* query =
923 query_manager->GetQuery(query_id);
924 if (query) {
925 query->AddCallback(
926 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
927 this->AsWeakPtr(),
928 id));
929 return;
933 // Something went wrong, run callback immediately.
934 OnSignalSyncPointAck(id);
938 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
939 bool has_callback) {
940 TRACE_EVENT0(
941 "gpu",
942 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
943 if (has_callback) {
944 if (!memory_manager_client_state_) {
945 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
946 this, surface_id_ != 0, true));
948 } else {
949 memory_manager_client_state_.reset();
953 void GpuCommandBufferStub::OnCreateImage(int32 id,
954 gfx::GpuMemoryBufferHandle handle,
955 gfx::Size size,
956 gfx::GpuMemoryBuffer::Format format,
957 uint32 internalformat) {
958 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
960 if (!decoder_)
961 return;
963 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
964 DCHECK(image_manager);
965 if (image_manager->LookupImage(id)) {
966 LOG(ERROR) << "Image already exists with same ID.";
967 return;
970 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
971 format, decoder_->GetCapabilities())) {
972 LOG(ERROR) << "Format is not supported.";
973 return;
976 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
977 format)) {
978 LOG(ERROR) << "Invalid image size for format.";
979 return;
982 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
983 internalformat, format)) {
984 LOG(ERROR) << "Incompatible image format.";
985 return;
988 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
989 handle, size, format, internalformat);
990 if (!image.get())
991 return;
993 image_manager->AddImage(image.get(), id);
996 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
997 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
999 if (!decoder_)
1000 return;
1002 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
1003 DCHECK(image_manager);
1004 if (!image_manager->LookupImage(id)) {
1005 LOG(ERROR) << "Image with ID doesn't exist.";
1006 return;
1009 image_manager->RemoveImage(id);
1012 void GpuCommandBufferStub::SendConsoleMessage(
1013 int32 id,
1014 const std::string& message) {
1015 GPUCommandBufferConsoleMessage console_message;
1016 console_message.id = id;
1017 console_message.message = message;
1018 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1019 route_id_, console_message);
1020 msg->set_unblock(true);
1021 Send(msg);
1024 void GpuCommandBufferStub::SendCachedShader(
1025 const std::string& key, const std::string& shader) {
1026 channel_->CacheShader(key, shader);
1029 void GpuCommandBufferStub::AddDestructionObserver(
1030 DestructionObserver* observer) {
1031 destruction_observers_.AddObserver(observer);
1034 void GpuCommandBufferStub::RemoveDestructionObserver(
1035 DestructionObserver* observer) {
1036 destruction_observers_.RemoveObserver(observer);
1039 void GpuCommandBufferStub::SetPreemptByFlag(
1040 scoped_refptr<gpu::PreemptionFlag> flag) {
1041 preemption_flag_ = flag;
1042 if (scheduler_)
1043 scheduler_->SetPreemptByFlag(preemption_flag_);
1046 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1047 *bytes = total_gpu_memory_;
1048 return !!total_gpu_memory_;
1051 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1052 if (!surface_.get())
1053 return gfx::Size();
1054 return surface_->GetSize();
1057 const gpu::gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const {
1058 return context_group_->feature_info();
1061 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1062 return context_group_->memory_tracker();
1065 void GpuCommandBufferStub::SetMemoryAllocation(
1066 const gpu::MemoryAllocation& allocation) {
1067 if (!last_memory_allocation_valid_ ||
1068 !allocation.Equals(last_memory_allocation_)) {
1069 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1070 route_id_, allocation));
1073 last_memory_allocation_valid_ = true;
1074 last_memory_allocation_ = allocation;
1077 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1078 bool suggest_have_frontbuffer) {
1079 // This can be called outside of OnMessageReceived, so the context needs
1080 // to be made current before calling methods on the surface.
1081 if (surface_.get() && MakeCurrent())
1082 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1085 bool GpuCommandBufferStub::CheckContextLost() {
1086 DCHECK(command_buffer_);
1087 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1088 bool was_lost = state.error == gpu::error::kLostContext;
1090 // Work around issues with recovery by allowing a new GPU process to launch.
1091 if (was_lost &&
1092 context_group_->feature_info()->workarounds().exit_on_context_lost &&
1093 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1094 switches::kSingleProcess) &&
1095 !base::CommandLine::ForCurrentProcess()->HasSwitch(
1096 switches::kInProcessGPU)) {
1097 LOG(ERROR) << "Exiting GPU process because some drivers cannot recover"
1098 << " from problems.";
1099 #if defined(OS_WIN)
1100 base::win::SetShouldCrashOnProcessDetach(false);
1101 #endif
1102 exit(0);
1104 // Lose all other contexts if the reset was triggered by the robustness
1105 // extension instead of being synthetic.
1106 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1107 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1108 use_virtualized_gl_context_))
1109 channel_->LoseAllContexts();
1110 CheckCompleteWaits();
1111 return was_lost;
1114 void GpuCommandBufferStub::MarkContextLost() {
1115 if (!command_buffer_ ||
1116 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1117 return;
1119 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1120 if (decoder_)
1121 decoder_->MarkContextLost(gpu::error::kUnknown);
1122 command_buffer_->SetParseError(gpu::error::kLostContext);
1125 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1126 return GetMemoryManager()->GetClientMemoryUsage(this);
1129 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1130 const std::vector<ui::LatencyInfo>& latency_info,
1131 gfx::SwapResult result) {
1132 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
1133 result));
1136 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1137 base::TimeDelta interval) {
1138 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1139 interval));
1142 } // namespace content