Android: Enable 'crash GPU process on context lost' WAR for Mali-400
[chromium-blink-merge.git] / content / common / gpu / gpu_command_buffer_stub.cc
blobbd57c41f19d2c76b3a6d8d4e369c0267680dd497
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/bind.h"
6 #include "base/bind_helpers.h"
7 #include "base/command_line.h"
8 #include "base/hash.h"
9 #include "base/json/json_writer.h"
10 #include "base/memory/shared_memory.h"
11 #include "base/time/time.h"
12 #include "base/trace_event/trace_event.h"
13 #include "build/build_config.h"
14 #include "content/common/gpu/gpu_channel.h"
15 #include "content/common/gpu/gpu_channel_manager.h"
16 #include "content/common/gpu/gpu_command_buffer_stub.h"
17 #include "content/common/gpu/gpu_memory_manager.h"
18 #include "content/common/gpu/gpu_memory_tracking.h"
19 #include "content/common/gpu/gpu_messages.h"
20 #include "content/common/gpu/gpu_watchdog.h"
21 #include "content/common/gpu/image_transport_surface.h"
22 #include "content/common/gpu/media/gpu_video_decode_accelerator.h"
23 #include "content/common/gpu/media/gpu_video_encode_accelerator.h"
24 #include "content/public/common/content_client.h"
25 #include "content/public/common/content_switches.h"
26 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/command_buffer/common/gles2_cmd_utils.h"
28 #include "gpu/command_buffer/common/mailbox.h"
29 #include "gpu/command_buffer/service/gl_context_virtual.h"
30 #include "gpu/command_buffer/service/gl_state_restorer_impl.h"
31 #include "gpu/command_buffer/service/image_factory.h"
32 #include "gpu/command_buffer/service/image_manager.h"
33 #include "gpu/command_buffer/service/logger.h"
34 #include "gpu/command_buffer/service/mailbox_manager.h"
35 #include "gpu/command_buffer/service/memory_tracking.h"
36 #include "gpu/command_buffer/service/query_manager.h"
37 #include "gpu/command_buffer/service/sync_point_manager.h"
38 #include "gpu/command_buffer/service/valuebuffer_manager.h"
39 #include "ui/gl/gl_bindings.h"
40 #include "ui/gl/gl_switches.h"
42 #if defined(OS_WIN)
43 #include "content/public/common/sandbox_init.h"
44 #endif
46 #if defined(OS_ANDROID)
47 #include "content/common/gpu/stream_texture_android.h"
48 #endif
50 namespace content {
51 struct WaitForCommandState {
52 WaitForCommandState(int32 start, int32 end, IPC::Message* reply)
53 : start(start), end(end), reply(reply) {}
55 int32 start;
56 int32 end;
57 scoped_ptr<IPC::Message> reply;
60 namespace {
62 // The GpuCommandBufferMemoryTracker class provides a bridge between the
63 // ContextGroup's memory type managers and the GpuMemoryManager class.
64 class GpuCommandBufferMemoryTracker : public gpu::gles2::MemoryTracker {
65 public:
66 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel) :
67 tracking_group_(channel->gpu_channel_manager()->gpu_memory_manager()->
68 CreateTrackingGroup(channel->renderer_pid(), this)) {
71 void TrackMemoryAllocatedChange(
72 size_t old_size,
73 size_t new_size,
74 gpu::gles2::MemoryTracker::Pool pool) override {
75 tracking_group_->TrackMemoryAllocatedChange(
76 old_size, new_size, pool);
79 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
80 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
83 private:
84 ~GpuCommandBufferMemoryTracker() override {}
85 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_;
87 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
90 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the
91 // url_hash matches.
92 void FastSetActiveURL(const GURL& url, size_t url_hash) {
93 // Leave the previously set URL in the empty case -- empty URLs are given by
94 // BlinkPlatformImpl::createOffscreenGraphicsContext3D. Hopefully the
95 // onscreen context URL was set previously and will show up even when a crash
96 // occurs during offscreen command processing.
97 if (url.is_empty())
98 return;
99 static size_t g_last_url_hash = 0;
100 if (url_hash != g_last_url_hash) {
101 g_last_url_hash = url_hash;
102 GetContentClient()->SetActiveURL(url);
106 // The first time polling a fence, delay some extra time to allow other
107 // stubs to process some work, or else the timing of the fences could
108 // allow a pattern of alternating fast and slow frames to occur.
109 const int64 kHandleMoreWorkPeriodMs = 2;
110 const int64 kHandleMoreWorkPeriodBusyMs = 1;
112 // Prevents idle work from being starved.
113 const int64 kMaxTimeSinceIdleMs = 10;
115 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat {
116 public:
117 static scoped_refptr<base::trace_event::ConvertableToTraceFormat>
118 CreateForChannel(GpuChannel* channel);
120 void AppendAsTraceFormat(std::string* out) const override {
121 std::string tmp;
122 base::JSONWriter::Write(*value_, &tmp);
123 *out += tmp;
126 private:
127 explicit DevToolsChannelData(base::Value* value) : value_(value) {}
128 ~DevToolsChannelData() override {}
129 scoped_ptr<base::Value> value_;
130 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData);
133 scoped_refptr<base::trace_event::ConvertableToTraceFormat>
134 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
135 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue);
136 res->SetInteger("renderer_pid", channel->renderer_pid());
137 res->SetDouble("used_bytes", channel->GetMemoryUsage());
138 res->SetDouble("limit_bytes",
139 channel->gpu_channel_manager()
140 ->gpu_memory_manager()
141 ->GetMaximumClientAllocation());
142 return new DevToolsChannelData(res.release());
145 } // namespace
147 GpuCommandBufferStub::GpuCommandBufferStub(
148 GpuChannel* channel,
149 GpuCommandBufferStub* share_group,
150 const gfx::GLSurfaceHandle& handle,
151 gpu::gles2::MailboxManager* mailbox_manager,
152 gpu::gles2::SubscriptionRefSet* subscription_ref_set,
153 gpu::ValueStateMap* pending_valuebuffer_state,
154 const gfx::Size& size,
155 const gpu::gles2::DisallowedFeatures& disallowed_features,
156 const std::vector<int32>& attribs,
157 gfx::GpuPreference gpu_preference,
158 bool use_virtualized_gl_context,
159 int32 route_id,
160 int32 surface_id,
161 GpuWatchdog* watchdog,
162 bool software,
163 const GURL& active_url)
164 : channel_(channel),
165 handle_(handle),
166 initial_size_(size),
167 disallowed_features_(disallowed_features),
168 requested_attribs_(attribs),
169 gpu_preference_(gpu_preference),
170 use_virtualized_gl_context_(use_virtualized_gl_context),
171 route_id_(route_id),
172 surface_id_(surface_id),
173 software_(software),
174 last_flush_count_(0),
175 last_memory_allocation_valid_(false),
176 watchdog_(watchdog),
177 sync_point_wait_count_(0),
178 delayed_work_scheduled_(false),
179 previous_messages_processed_(0),
180 active_url_(active_url),
181 total_gpu_memory_(0) {
182 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec());
183 FastSetActiveURL(active_url_, active_url_hash_);
185 gpu::gles2::ContextCreationAttribHelper attrib_parser;
186 attrib_parser.Parse(requested_attribs_);
188 if (share_group) {
189 context_group_ = share_group->context_group_;
190 DCHECK(context_group_->bind_generates_resource() ==
191 attrib_parser.bind_generates_resource);
192 } else {
193 context_group_ = new gpu::gles2::ContextGroup(
194 mailbox_manager,
195 new GpuCommandBufferMemoryTracker(channel),
196 channel_->gpu_channel_manager()->shader_translator_cache(),
197 NULL,
198 subscription_ref_set,
199 pending_valuebuffer_state,
200 attrib_parser.bind_generates_resource);
203 use_virtualized_gl_context_ |=
204 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts;
206 bool is_offscreen = surface_id_ == 0;
207 if (is_offscreen && initial_size_.IsEmpty()) {
208 // If we're an offscreen surface with zero width and/or height, set to a
209 // non-zero size so that we have a complete framebuffer for operations like
210 // glClear.
211 initial_size_ = gfx::Size(1, 1);
215 GpuCommandBufferStub::~GpuCommandBufferStub() {
216 Destroy();
218 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
219 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(surface_id()));
222 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
223 return channel()->gpu_channel_manager()->gpu_memory_manager();
226 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
227 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"),
228 "GPUTask",
229 "data",
230 DevToolsChannelData::CreateForChannel(channel()));
231 FastSetActiveURL(active_url_, active_url_hash_);
233 bool have_context = false;
234 // Ensure the appropriate GL context is current before handling any IPC
235 // messages directed at the command buffer. This ensures that the message
236 // handler can assume that the context is current (not necessary for
237 // RetireSyncPoint or WaitSyncPoint).
238 if (decoder_.get() &&
239 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
240 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
241 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID &&
242 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID &&
243 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID &&
244 message.type() != GpuCommandBufferMsg_RetireSyncPoint::ID &&
245 message.type() != GpuCommandBufferMsg_SignalSyncPoint::ID &&
246 message.type() !=
247 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback::ID) {
248 if (!MakeCurrent())
249 return false;
250 have_context = true;
253 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
254 // here. This is so the reply can be delayed if the scheduler is unscheduled.
255 bool handled = true;
256 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message)
257 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize,
258 OnInitialize);
259 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer,
260 OnSetGetBuffer);
261 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer,
262 OnProduceFrontBuffer);
263 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
264 OnWaitForTokenInRange);
265 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
266 OnWaitForGetOffsetInRange);
267 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
268 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Rescheduled, OnRescheduled);
269 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
270 OnRegisterTransferBuffer);
271 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
272 OnDestroyTransferBuffer);
273 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoDecoder,
274 OnCreateVideoDecoder)
275 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateVideoEncoder,
276 OnCreateVideoEncoder)
277 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetSurfaceVisible,
278 OnSetSurfaceVisible)
279 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RetireSyncPoint,
280 OnRetireSyncPoint)
281 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncPoint,
282 OnSignalSyncPoint)
283 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
284 OnSignalQuery)
285 IPC_MESSAGE_HANDLER(
286 GpuCommandBufferMsg_SetClientHasMemoryAllocationChangedCallback,
287 OnSetClientHasMemoryAllocationChangedCallback)
288 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
289 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
290 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
291 OnCreateStreamTexture)
292 IPC_MESSAGE_UNHANDLED(handled = false)
293 IPC_END_MESSAGE_MAP()
295 CheckCompleteWaits();
297 if (have_context) {
298 // Ensure that any delayed work that was created will be handled.
299 ScheduleDelayedWork(kHandleMoreWorkPeriodMs);
302 DCHECK(handled);
303 return handled;
306 bool GpuCommandBufferStub::Send(IPC::Message* message) {
307 return channel_->Send(message);
310 bool GpuCommandBufferStub::IsScheduled() {
311 return (!scheduler_.get() || scheduler_->IsScheduled());
314 bool GpuCommandBufferStub::HasMoreWork() {
315 return scheduler_.get() && scheduler_->HasMoreWork();
318 void GpuCommandBufferStub::PollWork() {
319 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PollWork");
320 delayed_work_scheduled_ = false;
321 FastSetActiveURL(active_url_, active_url_hash_);
322 if (decoder_.get() && !MakeCurrent())
323 return;
325 if (scheduler_) {
326 uint64 current_messages_processed =
327 channel()->gpu_channel_manager()->MessagesProcessed();
328 // We're idle when no messages were processed or scheduled.
329 bool is_idle =
330 (previous_messages_processed_ == current_messages_processed) &&
331 !channel()->gpu_channel_manager()->HandleMessagesScheduled();
332 if (!is_idle && !last_idle_time_.is_null()) {
333 base::TimeDelta time_since_idle =
334 base::TimeTicks::Now() - last_idle_time_;
335 base::TimeDelta max_time_since_idle =
336 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
338 // Force idle when it's been too long since last time we were idle.
339 if (time_since_idle > max_time_since_idle)
340 is_idle = true;
343 if (is_idle) {
344 last_idle_time_ = base::TimeTicks::Now();
345 scheduler_->PerformIdleWork();
348 ScheduleDelayedWork(kHandleMoreWorkPeriodBusyMs);
351 bool GpuCommandBufferStub::HasUnprocessedCommands() {
352 if (command_buffer_) {
353 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
354 return command_buffer_->GetPutOffset() != state.get_offset &&
355 !gpu::error::IsError(state.error);
357 return false;
360 void GpuCommandBufferStub::ScheduleDelayedWork(int64 delay) {
361 if (!HasMoreWork()) {
362 last_idle_time_ = base::TimeTicks();
363 return;
366 if (delayed_work_scheduled_)
367 return;
368 delayed_work_scheduled_ = true;
370 // Idle when no messages are processed between now and when
371 // PollWork is called.
372 previous_messages_processed_ =
373 channel()->gpu_channel_manager()->MessagesProcessed();
374 if (last_idle_time_.is_null())
375 last_idle_time_ = base::TimeTicks::Now();
377 // IsScheduled() returns true after passing all unschedule fences
378 // and this is when we can start performing idle work. Idle work
379 // is done synchronously so we can set delay to 0 and instead poll
380 // for more work at the rate idle work is performed. This also ensures
381 // that idle work is done as efficiently as possible without any
382 // unnecessary delays.
383 if (scheduler_.get() &&
384 scheduler_->IsScheduled() &&
385 scheduler_->HasMoreIdleWork()) {
386 delay = 0;
389 base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
390 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()),
391 base::TimeDelta::FromMilliseconds(delay));
394 bool GpuCommandBufferStub::MakeCurrent() {
395 if (decoder_->MakeCurrent())
396 return true;
397 DLOG(ERROR) << "Context lost because MakeCurrent failed.";
398 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
399 command_buffer_->SetParseError(gpu::error::kLostContext);
400 CheckContextLost();
401 return false;
404 void GpuCommandBufferStub::Destroy() {
405 if (wait_for_token_) {
406 Send(wait_for_token_->reply.release());
407 wait_for_token_.reset();
409 if (wait_for_get_offset_) {
410 Send(wait_for_get_offset_->reply.release());
411 wait_for_get_offset_.reset();
413 if (handle_.is_null() && !active_url_.is_empty()) {
414 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
415 gpu_channel_manager->Send(new GpuHostMsg_DidDestroyOffscreenContext(
416 active_url_));
419 memory_manager_client_state_.reset();
421 while (!sync_points_.empty())
422 OnRetireSyncPoint(sync_points_.front());
424 if (decoder_)
425 decoder_->set_engine(NULL);
427 // The scheduler has raw references to the decoder and the command buffer so
428 // destroy it before those.
429 scheduler_.reset();
431 bool have_context = false;
432 if (decoder_ && decoder_->GetGLContext()) {
433 // Try to make the context current regardless of whether it was lost, so we
434 // don't leak resources.
435 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
437 FOR_EACH_OBSERVER(DestructionObserver,
438 destruction_observers_,
439 OnWillDestroyStub());
441 if (decoder_) {
442 decoder_->Destroy(have_context);
443 decoder_.reset();
446 command_buffer_.reset();
448 // Remove this after crbug.com/248395 is sorted out.
449 surface_ = NULL;
452 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) {
453 Destroy();
454 GpuCommandBufferMsg_Initialize::WriteReplyParams(
455 reply_message, false, gpu::Capabilities());
456 Send(reply_message);
459 void GpuCommandBufferStub::OnInitialize(
460 base::SharedMemoryHandle shared_state_handle,
461 IPC::Message* reply_message) {
462 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize");
463 DCHECK(!command_buffer_.get());
465 scoped_ptr<base::SharedMemory> shared_state_shm(
466 new base::SharedMemory(shared_state_handle, false));
468 command_buffer_.reset(new gpu::CommandBufferService(
469 context_group_->transfer_buffer_manager()));
471 bool result = command_buffer_->Initialize();
472 DCHECK(result);
474 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group_.get()));
476 if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
477 switches::kSingleProcess) &&
478 !base::CommandLine::ForCurrentProcess()->HasSwitch(
479 switches::kInProcessGPU)) {
480 decoder_->SetAllowExit(true);
483 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
484 decoder_.get(),
485 decoder_.get()));
486 if (preemption_flag_.get())
487 scheduler_->SetPreemptByFlag(preemption_flag_);
489 decoder_->set_engine(scheduler_.get());
491 if (!handle_.is_null()) {
492 #if defined(OS_MACOSX) || defined(UI_COMPOSITOR_IMAGE_TRANSPORT)
493 if (software_) {
494 LOG(ERROR) << "No software support.";
495 OnInitializeFailed(reply_message);
496 return;
498 #endif
500 surface_ = ImageTransportSurface::CreateSurface(
501 channel_->gpu_channel_manager(),
502 this,
503 handle_);
504 } else {
505 GpuChannelManager* manager = channel_->gpu_channel_manager();
506 surface_ = manager->GetDefaultOffscreenSurface();
509 if (!surface_.get()) {
510 DLOG(ERROR) << "Failed to create surface.";
511 OnInitializeFailed(reply_message);
512 return;
515 scoped_refptr<gfx::GLContext> context;
516 if (use_virtualized_gl_context_ && channel_->share_group()) {
517 context = channel_->share_group()->GetSharedContext();
518 if (!context.get()) {
519 context = gfx::GLContext::CreateGLContext(
520 channel_->share_group(),
521 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(),
522 gpu_preference_);
523 if (!context.get()) {
524 DLOG(ERROR) << "Failed to create shared context for virtualization.";
525 OnInitializeFailed(reply_message);
526 return;
528 channel_->share_group()->SetSharedContext(context.get());
530 // This should be a non-virtual GL context.
531 DCHECK(context->GetHandle());
532 context = new gpu::GLContextVirtual(
533 channel_->share_group(), context.get(), decoder_->AsWeakPtr());
534 if (!context->Initialize(surface_.get(), gpu_preference_)) {
535 // TODO(sievers): The real context created above for the default
536 // offscreen surface might not be compatible with this surface.
537 // Need to adjust at least GLX to be able to create the initial context
538 // with a config that is compatible with onscreen and offscreen surfaces.
539 context = NULL;
541 DLOG(ERROR) << "Failed to initialize virtual GL context.";
542 OnInitializeFailed(reply_message);
543 return;
546 if (!context.get()) {
547 context = gfx::GLContext::CreateGLContext(
548 channel_->share_group(), surface_.get(), gpu_preference_);
550 if (!context.get()) {
551 DLOG(ERROR) << "Failed to create context.";
552 OnInitializeFailed(reply_message);
553 return;
556 if (!context->MakeCurrent(surface_.get())) {
557 LOG(ERROR) << "Failed to make context current.";
558 OnInitializeFailed(reply_message);
559 return;
562 if (!context->GetGLStateRestorer()) {
563 context->SetGLStateRestorer(
564 new gpu::GLStateRestorerImpl(decoder_->AsWeakPtr()));
567 if (!context->GetTotalGpuMemory(&total_gpu_memory_))
568 total_gpu_memory_ = 0;
570 if (!context_group_->has_program_cache()) {
571 context_group_->set_program_cache(
572 channel_->gpu_channel_manager()->program_cache());
575 // Initialize the decoder with either the view or pbuffer GLContext.
576 if (!decoder_->Initialize(surface_,
577 context,
578 !surface_id(),
579 initial_size_,
580 disallowed_features_,
581 requested_attribs_)) {
582 DLOG(ERROR) << "Failed to initialize decoder.";
583 OnInitializeFailed(reply_message);
584 return;
587 if (base::CommandLine::ForCurrentProcess()->HasSwitch(
588 switches::kEnableGPUServiceLogging)) {
589 decoder_->set_log_commands(true);
592 decoder_->GetLogger()->SetMsgCallback(
593 base::Bind(&GpuCommandBufferStub::SendConsoleMessage,
594 base::Unretained(this)));
595 decoder_->SetShaderCacheCallback(
596 base::Bind(&GpuCommandBufferStub::SendCachedShader,
597 base::Unretained(this)));
598 decoder_->SetWaitSyncPointCallback(
599 base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
600 base::Unretained(this)));
602 command_buffer_->SetPutOffsetChangeCallback(
603 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
604 command_buffer_->SetGetBufferChangeCallback(
605 base::Bind(&gpu::GpuScheduler::SetGetBuffer,
606 base::Unretained(scheduler_.get())));
607 command_buffer_->SetParseErrorCallback(
608 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this)));
609 scheduler_->SetSchedulingChangedCallback(
610 base::Bind(&GpuChannel::StubSchedulingChanged,
611 base::Unretained(channel_)));
613 if (watchdog_) {
614 scheduler_->SetCommandProcessedCallback(
615 base::Bind(&GpuCommandBufferStub::OnCommandProcessed,
616 base::Unretained(this)));
619 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState);
620 if (!shared_state_shm->Map(kSharedStateSize)) {
621 DLOG(ERROR) << "Failed to map shared state buffer.";
622 OnInitializeFailed(reply_message);
623 return;
625 command_buffer_->SetSharedStateBuffer(gpu::MakeBackingFromSharedMemory(
626 shared_state_shm.Pass(), kSharedStateSize));
628 gpu::Capabilities capabilities = decoder_->GetCapabilities();
629 capabilities.future_sync_points = channel_->allow_future_sync_points();
631 GpuCommandBufferMsg_Initialize::WriteReplyParams(
632 reply_message, true, capabilities);
633 Send(reply_message);
635 if (handle_.is_null() && !active_url_.is_empty()) {
636 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
637 gpu_channel_manager->Send(new GpuHostMsg_DidCreateOffscreenContext(
638 active_url_));
642 void GpuCommandBufferStub::OnCreateStreamTexture(
643 uint32 texture_id, int32 stream_id, bool* succeeded) {
644 #if defined(OS_ANDROID)
645 *succeeded = StreamTexture::Create(this, texture_id, stream_id);
646 #else
647 *succeeded = false;
648 #endif
651 void GpuCommandBufferStub::SetLatencyInfoCallback(
652 const LatencyInfoCallback& callback) {
653 latency_info_callback_ = callback;
656 int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
657 // The command buffer is pairs of enum, value
658 // search for the requested attribute, return the value.
659 for (std::vector<int32>::const_iterator it = requested_attribs_.begin();
660 it != requested_attribs_.end(); ++it) {
661 if (*it++ == attr) {
662 return *it;
665 return -1;
668 void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id,
669 IPC::Message* reply_message) {
670 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer");
671 if (command_buffer_)
672 command_buffer_->SetGetBuffer(shm_id);
673 Send(reply_message);
676 void GpuCommandBufferStub::OnProduceFrontBuffer(const gpu::Mailbox& mailbox) {
677 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer");
678 if (!decoder_) {
679 LOG(ERROR) << "Can't produce front buffer before initialization.";
680 return;
683 decoder_->ProduceFrontBuffer(mailbox);
686 void GpuCommandBufferStub::OnParseError() {
687 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError");
688 DCHECK(command_buffer_.get());
689 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
690 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed(
691 route_id_, state.context_lost_reason, state.error);
692 msg->set_unblock(true);
693 Send(msg);
695 // Tell the browser about this context loss as well, so it can
696 // determine whether client APIs like WebGL need to be immediately
697 // blocked from automatically running.
698 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
699 gpu_channel_manager->Send(new GpuHostMsg_DidLoseContext(
700 handle_.is_null(), state.context_lost_reason, active_url_));
702 CheckContextLost();
705 void GpuCommandBufferStub::OnWaitForTokenInRange(int32 start,
706 int32 end,
707 IPC::Message* reply_message) {
708 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange");
709 DCHECK(command_buffer_.get());
710 CheckContextLost();
711 if (wait_for_token_)
712 LOG(ERROR) << "Got WaitForToken command while currently waiting for token.";
713 wait_for_token_ =
714 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
715 CheckCompleteWaits();
718 void GpuCommandBufferStub::OnWaitForGetOffsetInRange(
719 int32 start,
720 int32 end,
721 IPC::Message* reply_message) {
722 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange");
723 DCHECK(command_buffer_.get());
724 CheckContextLost();
725 if (wait_for_get_offset_) {
726 LOG(ERROR)
727 << "Got WaitForGetOffset command while currently waiting for offset.";
729 wait_for_get_offset_ =
730 make_scoped_ptr(new WaitForCommandState(start, end, reply_message));
731 CheckCompleteWaits();
734 void GpuCommandBufferStub::CheckCompleteWaits() {
735 if (wait_for_token_ || wait_for_get_offset_) {
736 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
737 if (wait_for_token_ &&
738 (gpu::CommandBuffer::InRange(
739 wait_for_token_->start, wait_for_token_->end, state.token) ||
740 state.error != gpu::error::kNoError)) {
741 ReportState();
742 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
743 wait_for_token_->reply.get(), state);
744 Send(wait_for_token_->reply.release());
745 wait_for_token_.reset();
747 if (wait_for_get_offset_ &&
748 (gpu::CommandBuffer::InRange(wait_for_get_offset_->start,
749 wait_for_get_offset_->end,
750 state.get_offset) ||
751 state.error != gpu::error::kNoError)) {
752 ReportState();
753 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
754 wait_for_get_offset_->reply.get(), state);
755 Send(wait_for_get_offset_->reply.release());
756 wait_for_get_offset_.reset();
761 void GpuCommandBufferStub::OnAsyncFlush(
762 int32 put_offset,
763 uint32 flush_count,
764 const std::vector<ui::LatencyInfo>& latency_info) {
765 TRACE_EVENT1(
766 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset);
768 if (ui::LatencyInfo::Verify(latency_info,
769 "GpuCommandBufferStub::OnAsyncFlush") &&
770 !latency_info_callback_.is_null()) {
771 latency_info_callback_.Run(latency_info);
773 DCHECK(command_buffer_.get());
774 if (flush_count - last_flush_count_ < 0x8000000U) {
775 last_flush_count_ = flush_count;
776 command_buffer_->Flush(put_offset);
777 } else {
778 // We received this message out-of-order. This should not happen but is here
779 // to catch regressions. Ignore the message.
780 NOTREACHED() << "Received a Flush message out-of-order";
783 ReportState();
786 void GpuCommandBufferStub::OnRescheduled() {
787 gpu::CommandBuffer::State pre_state = command_buffer_->GetLastState();
788 command_buffer_->Flush(command_buffer_->GetPutOffset());
789 gpu::CommandBuffer::State post_state = command_buffer_->GetLastState();
791 if (pre_state.get_offset != post_state.get_offset)
792 ReportState();
795 void GpuCommandBufferStub::OnRegisterTransferBuffer(
796 int32 id,
797 base::SharedMemoryHandle transfer_buffer,
798 uint32 size) {
799 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer");
801 // Take ownership of the memory and map it into this process.
802 // This validates the size.
803 scoped_ptr<base::SharedMemory> shared_memory(
804 new base::SharedMemory(transfer_buffer, false));
805 if (!shared_memory->Map(size)) {
806 DVLOG(0) << "Failed to map shared memory.";
807 return;
810 if (command_buffer_) {
811 command_buffer_->RegisterTransferBuffer(
812 id, gpu::MakeBackingFromSharedMemory(shared_memory.Pass(), size));
816 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) {
817 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer");
819 if (command_buffer_)
820 command_buffer_->DestroyTransferBuffer(id);
823 void GpuCommandBufferStub::OnCommandProcessed() {
824 if (watchdog_)
825 watchdog_->CheckArmed();
828 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); }
830 void GpuCommandBufferStub::PutChanged() {
831 FastSetActiveURL(active_url_, active_url_hash_);
832 scheduler_->PutChanged();
835 void GpuCommandBufferStub::OnCreateVideoDecoder(
836 media::VideoCodecProfile profile,
837 int32 decoder_route_id,
838 IPC::Message* reply_message) {
839 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoDecoder");
840 GpuVideoDecodeAccelerator* decoder = new GpuVideoDecodeAccelerator(
841 decoder_route_id, this, channel_->io_task_runner());
842 decoder->Initialize(profile, reply_message);
843 // decoder is registered as a DestructionObserver of this stub and will
844 // self-delete during destruction of this stub.
847 void GpuCommandBufferStub::OnCreateVideoEncoder(
848 media::VideoFrame::Format input_format,
849 const gfx::Size& input_visible_size,
850 media::VideoCodecProfile output_profile,
851 uint32 initial_bitrate,
852 int32 encoder_route_id,
853 IPC::Message* reply_message) {
854 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateVideoEncoder");
855 GpuVideoEncodeAccelerator* encoder =
856 new GpuVideoEncodeAccelerator(encoder_route_id, this);
857 encoder->Initialize(input_format,
858 input_visible_size,
859 output_profile,
860 initial_bitrate,
861 reply_message);
862 // encoder is registered as a DestructionObserver of this stub and will
863 // self-delete during destruction of this stub.
866 void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) {
867 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible");
868 if (memory_manager_client_state_)
869 memory_manager_client_state_->SetVisible(visible);
872 void GpuCommandBufferStub::AddSyncPoint(uint32 sync_point) {
873 sync_points_.push_back(sync_point);
876 void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
877 DCHECK(!sync_points_.empty() && sync_points_.front() == sync_point);
878 sync_points_.pop_front();
879 GpuChannelManager* manager = channel_->gpu_channel_manager();
880 manager->sync_point_manager()->RetireSyncPoint(sync_point);
883 bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
884 if (!sync_point)
885 return true;
886 GpuChannelManager* manager = channel_->gpu_channel_manager();
887 if (manager->sync_point_manager()->IsSyncPointRetired(sync_point))
888 return true;
890 if (sync_point_wait_count_ == 0) {
891 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncPoint", this,
892 "GpuCommandBufferStub", this);
894 scheduler_->SetScheduled(false);
895 ++sync_point_wait_count_;
896 manager->sync_point_manager()->AddSyncPointCallback(
897 sync_point,
898 base::Bind(&GpuCommandBufferStub::OnSyncPointRetired,
899 this->AsWeakPtr()));
900 return scheduler_->IsScheduled();
903 void GpuCommandBufferStub::OnSyncPointRetired() {
904 --sync_point_wait_count_;
905 if (sync_point_wait_count_ == 0) {
906 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
907 "GpuCommandBufferStub", this);
909 scheduler_->SetScheduled(true);
912 void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
913 GpuChannelManager* manager = channel_->gpu_channel_manager();
914 manager->sync_point_manager()->AddSyncPointCallback(
915 sync_point,
916 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
917 this->AsWeakPtr(),
918 id));
921 void GpuCommandBufferStub::OnSignalSyncPointAck(uint32 id) {
922 Send(new GpuCommandBufferMsg_SignalSyncPointAck(route_id_, id));
925 void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
926 if (decoder_) {
927 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager();
928 if (query_manager) {
929 gpu::gles2::QueryManager::Query* query =
930 query_manager->GetQuery(query_id);
931 if (query) {
932 query->AddCallback(
933 base::Bind(&GpuCommandBufferStub::OnSignalSyncPointAck,
934 this->AsWeakPtr(),
935 id));
936 return;
940 // Something went wrong, run callback immediately.
941 OnSignalSyncPointAck(id);
945 void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
946 bool has_callback) {
947 TRACE_EVENT0(
948 "gpu",
949 "GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback");
950 if (has_callback) {
951 if (!memory_manager_client_state_) {
952 memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState(
953 this, surface_id_ != 0, true));
955 } else {
956 memory_manager_client_state_.reset();
960 void GpuCommandBufferStub::OnCreateImage(int32 id,
961 gfx::GpuMemoryBufferHandle handle,
962 gfx::Size size,
963 gfx::GpuMemoryBuffer::Format format,
964 uint32 internalformat) {
965 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
967 if (!decoder_)
968 return;
970 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
971 DCHECK(image_manager);
972 if (image_manager->LookupImage(id)) {
973 LOG(ERROR) << "Image already exists with same ID.";
974 return;
977 if (!gpu::ImageFactory::IsGpuMemoryBufferFormatSupported(
978 format, decoder_->GetCapabilities())) {
979 LOG(ERROR) << "Format is not supported.";
980 return;
983 if (!gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size,
984 format)) {
985 LOG(ERROR) << "Invalid image size for format.";
986 return;
989 if (!gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat(
990 internalformat, format)) {
991 LOG(ERROR) << "Incompatible image format.";
992 return;
995 scoped_refptr<gfx::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
996 handle, size, format, internalformat);
997 if (!image.get())
998 return;
1000 image_manager->AddImage(image.get(), id);
1003 void GpuCommandBufferStub::OnDestroyImage(int32 id) {
1004 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1006 if (!decoder_)
1007 return;
1009 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
1010 DCHECK(image_manager);
1011 if (!image_manager->LookupImage(id)) {
1012 LOG(ERROR) << "Image with ID doesn't exist.";
1013 return;
1016 image_manager->RemoveImage(id);
1019 void GpuCommandBufferStub::SendConsoleMessage(
1020 int32 id,
1021 const std::string& message) {
1022 GPUCommandBufferConsoleMessage console_message;
1023 console_message.id = id;
1024 console_message.message = message;
1025 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg(
1026 route_id_, console_message);
1027 msg->set_unblock(true);
1028 Send(msg);
1031 void GpuCommandBufferStub::SendCachedShader(
1032 const std::string& key, const std::string& shader) {
1033 channel_->CacheShader(key, shader);
1036 void GpuCommandBufferStub::AddDestructionObserver(
1037 DestructionObserver* observer) {
1038 destruction_observers_.AddObserver(observer);
1041 void GpuCommandBufferStub::RemoveDestructionObserver(
1042 DestructionObserver* observer) {
1043 destruction_observers_.RemoveObserver(observer);
1046 void GpuCommandBufferStub::SetPreemptByFlag(
1047 scoped_refptr<gpu::PreemptionFlag> flag) {
1048 preemption_flag_ = flag;
1049 if (scheduler_)
1050 scheduler_->SetPreemptByFlag(preemption_flag_);
1053 bool GpuCommandBufferStub::GetTotalGpuMemory(uint64* bytes) {
1054 *bytes = total_gpu_memory_;
1055 return !!total_gpu_memory_;
1058 gfx::Size GpuCommandBufferStub::GetSurfaceSize() const {
1059 if (!surface_.get())
1060 return gfx::Size();
1061 return surface_->GetSize();
1064 gpu::gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const {
1065 return context_group_->memory_tracker();
1068 void GpuCommandBufferStub::SetMemoryAllocation(
1069 const gpu::MemoryAllocation& allocation) {
1070 if (!last_memory_allocation_valid_ ||
1071 !allocation.Equals(last_memory_allocation_)) {
1072 Send(new GpuCommandBufferMsg_SetMemoryAllocation(
1073 route_id_, allocation));
1076 last_memory_allocation_valid_ = true;
1077 last_memory_allocation_ = allocation;
1080 void GpuCommandBufferStub::SuggestHaveFrontBuffer(
1081 bool suggest_have_frontbuffer) {
1082 // This can be called outside of OnMessageReceived, so the context needs
1083 // to be made current before calling methods on the surface.
1084 if (surface_.get() && MakeCurrent())
1085 surface_->SetFrontbufferAllocation(suggest_have_frontbuffer);
1088 bool GpuCommandBufferStub::CheckContextLost() {
1089 DCHECK(command_buffer_);
1090 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
1091 bool was_lost = state.error == gpu::error::kLostContext;
1092 // Lose all other contexts if the reset was triggered by the robustness
1093 // extension instead of being synthetic.
1094 if (was_lost && decoder_ && decoder_->WasContextLostByRobustnessExtension() &&
1095 (gfx::GLContext::LosesAllContextsOnContextLost() ||
1096 use_virtualized_gl_context_))
1097 channel_->LoseAllContexts();
1098 CheckCompleteWaits();
1099 return was_lost;
1102 void GpuCommandBufferStub::MarkContextLost() {
1103 if (!command_buffer_ ||
1104 command_buffer_->GetLastState().error == gpu::error::kLostContext)
1105 return;
1107 command_buffer_->SetContextLostReason(gpu::error::kUnknown);
1108 if (decoder_)
1109 decoder_->MarkContextLost(gpu::error::kUnknown);
1110 command_buffer_->SetParseError(gpu::error::kLostContext);
1113 uint64 GpuCommandBufferStub::GetMemoryUsage() const {
1114 return GetMemoryManager()->GetClientMemoryUsage(this);
1117 void GpuCommandBufferStub::SendSwapBuffersCompleted(
1118 const std::vector<ui::LatencyInfo>& latency_info,
1119 gfx::SwapResult result) {
1120 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info,
1121 result));
1124 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase,
1125 base::TimeDelta interval) {
1126 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase,
1127 interval));
1130 } // namespace content